diff --git a/.changeset/orange-books-rush.md b/.changeset/orange-books-rush.md
new file mode 100644
index 00000000..b234909f
--- /dev/null
+++ b/.changeset/orange-books-rush.md
@@ -0,0 +1,6 @@
+---
+"@jspsych-contrib/plugin-text-to-speech-button-reponse": major
+"@jspsych-contrib/plugin-text-to-speech-keyboard-response": major
+---
+
+adding text to speech with a button response and keyboard response
diff --git a/package-lock.json b/package-lock.json
index 0a97c4a4..a629bbb0 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -3498,6 +3498,14 @@
"resolved": "packages/plugin-survey-slider",
"link": true
},
+ "node_modules/@jspsych-contrib/plugin-text-to-speech-button-reponse": {
+ "resolved": "packages/plugin-text-to-speech-button-response",
+ "link": true
+ },
+ "node_modules/@jspsych-contrib/plugin-text-to-speech-keyboard-response": {
+ "resolved": "packages/plugin-text-to-speech-keyboard-response",
+ "link": true
+ },
"node_modules/@jspsych-contrib/plugin-video-several-keyboard-responses": {
"resolved": "packages/plugin-video-several-keyboard-responses",
"link": true
@@ -19088,6 +19096,61 @@
"jspsych": ">=7.0.0"
}
},
+ "packages/plugin-text-to-speech-button": {
+ "name": "@jspsych-contrib/plugin-text-to-speech-button",
+ "version": "0.0.1",
+ "extraneous": true,
+ "license": "MIT",
+ "devDependencies": {
+ "@jspsych/config": "^2.0.0",
+ "@jspsych/test-utils": "^1.0.0",
+ "@types/dom-speech-recognition": "^0.0.4",
+ "jspsych": "^7.0.0"
+ },
+ "peerDependencies": {
+ "jspsych": ">=7.0.0"
+ }
+ },
+ "packages/plugin-text-to-speech-button-response": {
+ "name": "@jspsych-contrib/plugin-text-to-speech-button-reponse",
+ "version": "0.0.1",
+ "license": "MIT",
+ "devDependencies": {
+ "@jspsych/config": "^2.0.0",
+ "@jspsych/test-utils": "^1.0.0",
+ "jspsych": "^7.0.0"
+ },
+ "peerDependencies": {
+ "jspsych": ">=7.0.0"
+ }
+ },
+ "packages/plugin-text-to-speech-keyboard": {
+ "name": "@jspsych-contrib/plugin-text-to-speech-keyboard",
+ "version": "0.0.1",
+ "extraneous": true,
+ "license": "MIT",
+ "devDependencies": {
+ "@jspsych/config": "^2.0.0",
+ "@jspsych/test-utils": "^1.0.0",
+ "jspsych": "^7.0.0"
+ },
+ "peerDependencies": {
+ "jspsych": ">=7.0.0"
+ }
+ },
+ "packages/plugin-text-to-speech-keyboard-response": {
+ "name": "@jspsych-contrib/plugin-text-to-speech-keyboard-response",
+ "version": "0.0.1",
+ "license": "MIT",
+ "devDependencies": {
+ "@jspsych/config": "^2.0.0",
+ "@jspsych/test-utils": "^1.0.0",
+ "jspsych": "^7.0.0"
+ },
+ "peerDependencies": {
+ "jspsych": ">=7.0.0"
+ }
+ },
"packages/plugin-video-several-keyboard-responses": {
"name": "@jspsych-contrib/plugin-video-several-keyboard-responses",
"version": "2.0.0",
diff --git a/packages/plugin-text-to-speech-button-response/README.md b/packages/plugin-text-to-speech-button-response/README.md
new file mode 100644
index 00000000..73220675
--- /dev/null
+++ b/packages/plugin-text-to-speech-button-response/README.md
@@ -0,0 +1,35 @@
+# text-to-speech-button-response
+
+## Overview
+
+Displays text, reasd to participant using SpeechSynthesis, has buttons for responses
+
+## Loading
+
+### In browser
+
+```js
+
+```
+
+### Via NPM
+
+```
+npm install @jspsych-contrib/plugin-text-to-speech-button-response
+```
+
+```js
+import jsPsychTextToSpeechButtonResponse from '@jspsych-contrib/plugin-text-to-speech-button-response';
+```
+
+## Compatibility
+
+jsPsych 7.0.0
+
+## Documentation
+
+See [documentation](https://github.com/jspsych/jspsych-contrib/blob/main/packages/plugin-text-to-speech-button/docs/jspsych-text-to-speech-button-response.md)
+
+## Author / Citation
+
+[Cian Monnin](https://github.com/CMonnin)
diff --git a/packages/plugin-text-to-speech-button-response/docs/text-to-speech-button-reponse.md b/packages/plugin-text-to-speech-button-response/docs/text-to-speech-button-reponse.md
new file mode 100644
index 00000000..54aeb77e
--- /dev/null
+++ b/packages/plugin-text-to-speech-button-response/docs/text-to-speech-button-reponse.md
@@ -0,0 +1,69 @@
+# text-to-speech-button-response
+
+Displays text, reads to participant using SpeechSynthesis, has buttons for responses
+
+## Parameters
+
+In addition to the [parameters available in all plugins](https://jspsych.org/latest/overview/plugins.md#parameters-available-in-all-plugins), this plugin accepts the following parameters. Parameters with a default value of undefined must be specified. Other parameters can be left unspecified if the default value is acceptable.
+
+| Parameter | Type | Default Value | Description |
+| ------------------- | -------------- | ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- |
+| stimulus | STRING | undefined | The text to be displayed and converted into speech. |
+| choices | STRING (array) | undefined | Labels for the buttons. Each string in the array generates a different button. |
+| lang | STRING | "en-US" | The language of the voice for the speechSynthesis API. Falls back to 'en-US'. Depends on available system/browser voices. |
+| button_html | FUNCTION | (choice)=>{}``} | Function that generates HTML for each button. |
+| prompt | HTML_STRING | null | HTML content displayed below the stimulus, typically a reminder of the action to take. |
+| stimulus_duration | INT | null | Time in milliseconds to display the stimulus. If null, the stimulus remains visible until the trial ends. |
+| trial_duration | INT | null | Time limit in milliseconds for the participant to respond. If null, the trial waits indefinitely for a response. |
+| button_layout | STRING | "grid" | Layout for buttons. 'grid' makes the container a grid, 'flex' makes it a flexbox. |
+| grid_rows | INT | 1 | Number of rows in the button grid. Applicable when `button_layout` is set to 'grid'. |
+| grid_columns | INT | null | Number of columns in the button grid. Applicable when `button_layout` is set to 'grid'. |
+| response_ends_trial | BOOL | true | If true, the trial ends when the participant responds. If false, the trial continues until trial_duration is reached. |
+| enable_button_after | INT | 0 | Delay in milliseconds before enabling the buttons. |
+
+## Data Generated
+
+In addition to the [default data collected by all plugins](https://jspsych.org/latest/overview/plugins.md#data-collected-by-all-plugins), this plugin collects the following data for each trial.
+
+| Name | Type | Value |
+| -------- | ------ | ---------------------------------------------------------------------------------------------- |
+| rt | INT | The response time in milliseconds. |
+| response | INT | Indicates which button the participant pressed. 0 for the first button, 1 for the second, etc. |
+| stimulus | STRING | The string content that was displayed on the screen. |
+
+## Install
+
+Using the CDN-hosted JavaScript file:
+
+```js
+
+```
+
+Using the JavaScript file downloaded from a GitHub release dist archive:
+
+```js
+
+```
+
+Using NPM:
+
+```
+npm install @jspsych-contrib/plugin-text-to-speech-button
+```
+
+```js
+import TextToSpeechButtonResponse from '@jspsych-contrib/plugin-text-to-speech-button-response';
+```
+
+## Examples
+
+### Setting SpeechSynthesis voice to french
+
+```javascript
+ const trial= {
+ lang: 'fr-Fr',
+ stimulus: 'This is a string',
+ choices: ['Button A', 'Button B'],
+ type: TextToSpeechButtonResponse,
+ };
+```
diff --git a/packages/plugin-text-to-speech-button-response/examples/index.html b/packages/plugin-text-to-speech-button-response/examples/index.html
new file mode 100644
index 00000000..369c0b4f
--- /dev/null
+++ b/packages/plugin-text-to-speech-button-response/examples/index.html
@@ -0,0 +1,22 @@
+
+
+
'
+ );
+
+ await clickTarget(displayElement.querySelector('[data-choice="0"]'));
+ expect(
+ displayElement.querySelector("#jspsych-text-to-speech-button-response-stimulus").classList
+ ).toContain("responded");
+ });
+
+ test("buttons should be disabled first and then enabled after enable_button_after is set", async () => {
+ const { getHTML } = await startTimeline([
+ {
+ type: TextToSpeechButtonResponse,
+ stimulus: "this is a string",
+ choices: ["button_choice"],
+ enable_button_after: 500,
+ },
+ ]);
+
+ const btns = document.querySelectorAll(".jspsych-html-button-response-button button");
+
+ for (let i = 0; i < btns.length; i++) {
+ expect(btns[i].getAttribute("disabled")).toBe("disabled");
+ }
+
+ jest.advanceTimersByTime(500);
+
+ for (let i = 0; i < btns.length; i++) {
+ expect(btns[i].hasAttribute("disabled")).toBe(false);
+ }
+ });
+});
+
+describe("html-button-response simulation", () => {
+ test("data mode works", async () => {
+ const ENABLE_BUTTON_AFTER = 2000;
+
+ const timeline = [
+ {
+ type: TextToSpeechButtonResponse,
+ stimulus: "foo",
+ choices: ["a", "b", "c"],
+ enable_button_after: ENABLE_BUTTON_AFTER,
+ },
+ ];
+
+ const { expectFinished, getData } = await simulateTimeline(timeline);
+
+ await expectFinished();
+
+ const response = getData().values()[0].response;
+
+ expect(getData().values()[0].rt).toBeGreaterThan(ENABLE_BUTTON_AFTER);
+ expect(response).toBeGreaterThanOrEqual(0);
+ expect(response).toBeLessThanOrEqual(2);
+ });
+
+ test("visual mode works", async () => {
+ const ENABLE_BUTTON_AFTER = 2000;
+
+ const timeline = [
+ {
+ type: TextToSpeechButtonResponse,
+ stimulus: "foo",
+ choices: ["a", "b", "c"],
+ enable_button_after: ENABLE_BUTTON_AFTER,
+ },
+ ];
+
+ const { expectFinished, expectRunning, getHTML, getData } = await simulateTimeline(
+ timeline,
+ "visual"
+ );
+
+ await expectRunning();
+
+ expect(getHTML()).toContain("foo");
+
+ jest.runAllTimers();
+
+ await expectFinished();
+
+ const response = getData().values()[0].response;
+
+ expect(getData().values()[0].rt).toBeGreaterThan(ENABLE_BUTTON_AFTER);
+ expect(response).toBeGreaterThanOrEqual(0);
+ expect(response).toBeLessThanOrEqual(2);
+ });
+});
diff --git a/packages/plugin-text-to-speech-button-response/src/index.ts b/packages/plugin-text-to-speech-button-response/src/index.ts
new file mode 100644
index 00000000..d5b362ec
--- /dev/null
+++ b/packages/plugin-text-to-speech-button-response/src/index.ts
@@ -0,0 +1,316 @@
+import { JsPsych, JsPsychPlugin, ParameterType, TrialType } from "jspsych";
+
+import { version } from "../package.json";
+
+const info = {
+ name: "text-to-speech-button-response",
+ version: version,
+ parameters: {
+ /** The text to be displayed and converted into speech. */
+ stimulus: {
+ type: ParameterType.STRING,
+ default: undefined,
+ },
+ /** Labels for the buttons. Each different string in the array will generate a different button. */
+ choices: {
+ type: ParameterType.STRING,
+ default: undefined,
+ array: true,
+ },
+ /**
+ * This is for set the languge of voice of the speechSynthesis API
+ * Fallback to 'en-US'
+ * These depend on the voices avaiable to the system.
+ * Some browsers come with local languges, e.g. Google Chrome comes with a number of languages like 'en-US', 'en-GB', 'fr-FR', 'de-DE' ... etc.
+ * Firefox comes with none and depends on the system to have voices for speechSynthesis
+ */
+ lang: {
+ type: ParameterType.STRING,
+ default: "en-US",
+ },
+ /**
+ * A function that generates the HTML for each button in the `choices` array. The function gets the string of the item in the `choices` array and should return valid HTML. If you want to use different markup for each button, you can do that by using a conditional on either parameter. The default parameter returns a button element with the text label of the choice.
+ */
+ button_html: {
+ type: ParameterType.FUNCTION,
+ default: function (choice: string) {
+ return ``;
+ },
+ },
+ /** This string can contain HTML markup. Any content here will be displayed below the stimulus. The intention is that it can be used to provide a reminder about the action the participant is supposed to take (e.g., which key to press). */
+ prompt: {
+ type: ParameterType.HTML_STRING,
+ default: null,
+ },
+ /** How long to display the stimulus in milliseconds. The visibility CSS property of the stimulus will be set to `hidden` after this time has elapsed. If this is null, then the stimulus will remain visible until the trial ends. */
+ stimulus_duration: {
+ type: ParameterType.INT,
+ default: null,
+ },
+ /** How long to wait for the participant to make a response before ending the trial in milliseconds. If the participant fails to make a response before this timer is reached, the participant's response will be recorded as null for the trial and the trial will end. If the value of this parameter is null, the trial will wait for a response indefinitely. */
+ trial_duration: {
+ type: ParameterType.INT,
+ default: null,
+ },
+ /** Setting to `'grid'` will make the container element have the CSS property `display: grid` and enable the use of `grid_rows` and `grid_columns`. Setting to `'flex'` will make the container element have the CSS property `display: flex`. You can customize how the buttons are laid out by adding inline CSS in the `button_html` parameter. */
+ button_layout: {
+ type: ParameterType.STRING,
+ default: "grid",
+ },
+ /**
+ * The number of rows in the button grid. Only applicable when `button_layout` is set to `'grid'`. If null, the number of rows will be determined automatically based on the number of buttons and the number of columns.
+ */
+ grid_rows: {
+ type: ParameterType.INT,
+ default: 1,
+ },
+ /**
+ * The number of columns in the button grid. Only applicable when `button_layout` is set to `'grid'`. If null, the number of columns will be determined automatically based on the number of buttons and the number of rows.
+ */
+ grid_columns: {
+ type: ParameterType.INT,
+ default: null,
+ },
+ /** If true, then the trial will end whenever the participant makes a response (assuming they make their response before the cutoff specified by the `trial_duration` parameter). If false, then the trial will continue until the value for `trial_duration` is reached. You can set this parameter to `false` to force the participant to view a stimulus for a fixed amount of time, even if they respond before the time is complete. */
+ response_ends_trial: {
+ type: ParameterType.BOOL,
+ default: true,
+ },
+ /** How long the button will delay enabling in milliseconds. */
+ enable_button_after: {
+ type: ParameterType.INT,
+ default: 0,
+ },
+ /** A pause between words in milliseconds */
+ time_between_words: {
+ type: ParameterType.INT,
+ default: 0,
+ },
+ },
+ data: {
+ /** The response time in milliseconds for the participant to make a response. The time is measured from when the stimulus first appears on the screen until the participant's response. */
+ rt: {
+ type: ParameterType.INT,
+ },
+ /** Indicates which button the participant pressed. The first button in the `choices` array is 0, the second is 1, and so on. */
+ response: {
+ type: ParameterType.INT,
+ },
+ /** The string that was displayed on the screen. */
+ stimulus: {
+ type: ParameterType.STRING,
+ },
+ },
+};
+
+type Info = typeof info;
+/**
+ * **text-to-speech-button**
+ *
+ * Displays text, reads to participant using SpeechSynthesis, has buttons for responses.
+ *
+ * @author Cian Monnin
+ * @see {@link https://github.com/jspsych/jspsych-contrib/packages/plugin-text-to-speech-button/README.md}}
+ */
+class TextToSpeechButtonPluginResponse implements JsPsychPlugin {
+ static info = info;
+
+ constructor(private jsPsych: JsPsych) {
+ this.jsPsych = jsPsych;
+ }
+
+ trial(display_element: HTMLElement, trial: TrialType) {
+ // Display stimulus
+ const stimulusElement = document.createElement("div");
+ stimulusElement.id = "jspsych-text-to-speech-button-response-stimulus";
+ stimulusElement.innerHTML = trial.stimulus;
+ display_element.appendChild(stimulusElement);
+
+ // Display buttons
+ const buttonGroupElement = document.createElement("div");
+ buttonGroupElement.id = "jspsych-text-to-speech-btngroup";
+ if (trial.button_layout === "grid") {
+ buttonGroupElement.classList.add("jspsych-btn-group-grid");
+ if (trial.grid_rows === null && trial.grid_columns === null) {
+ throw new Error(
+ "You cannot set `grid_rows` to `null` without providing a value for `grid_columns`."
+ );
+ }
+ const n_cols =
+ trial.grid_columns === null
+ ? Math.ceil(trial.choices.length / trial.grid_rows)
+ : trial.grid_columns;
+ const n_rows =
+ trial.grid_rows === null
+ ? Math.ceil(trial.choices.length / trial.grid_columns)
+ : trial.grid_rows;
+ buttonGroupElement.style.gridTemplateColumns = `repeat(${n_cols}, 1fr)`;
+ buttonGroupElement.style.gridTemplateRows = `repeat(${n_rows}, 1fr)`;
+ } else if (trial.button_layout === "flex") {
+ buttonGroupElement.classList.add("jspsych-btn-group-flex");
+ }
+
+ for (const [choiceIndex, choice] of trial.choices.entries()) {
+ buttonGroupElement.insertAdjacentHTML("beforeend", trial.button_html(choice, choiceIndex));
+ const buttonElement = buttonGroupElement.lastChild as HTMLElement;
+ buttonElement.dataset.choice = choiceIndex.toString();
+ buttonElement.addEventListener("click", () => {
+ after_response(choiceIndex);
+ });
+ }
+
+ display_element.appendChild(buttonGroupElement);
+
+ // Show prompt if there is one
+ if (trial.prompt !== null) {
+ display_element.insertAdjacentHTML("beforeend", trial.prompt);
+ }
+
+ // Set up SpeechSytnthesis
+ const words = trial.stimulus.split(" ");
+ let currentIndex = 0;
+
+ // start time
+ const start_time = performance.now();
+
+ function speakNextWord() {
+ if (currentIndex < words.length) {
+ const utterance = new SpeechSynthesisUtterance(words[currentIndex]);
+ utterance.lang = trial.lang;
+
+ utterance.onend = () => {
+ setTimeout(() => {
+ currentIndex++;
+ speakNextWord();
+ }, trial.time_between_words);
+ };
+ speechSynthesis.speak(utterance);
+ }
+ }
+ speakNextWord();
+
+ // store response
+ const response = {
+ rt: null,
+ button: null,
+ };
+
+ const end_trial = () => {
+ const trial_data = {
+ rt: response.rt,
+ stimulus: trial.stimulus,
+ response: response.button,
+ };
+
+ // move on to the next trial
+ this.jsPsych.finishTrial(trial_data);
+ };
+
+ // function to handle responses by the subject
+ function after_response(choice) {
+ // measure rt
+ const end_time = performance.now();
+ const rt = Math.round(end_time - start_time);
+ response.button = parseInt(choice);
+ response.rt = rt;
+
+ // after a valid response, the stimulus will have the CSS class 'responded'
+ // which can be used to provide visual feedback that a response was recorded
+ stimulusElement.classList.add("responded");
+
+ // disable all the buttons after a response
+ for (const button of buttonGroupElement.children) {
+ button.setAttribute("disabled", "disabled");
+ }
+
+ if (trial.response_ends_trial) {
+ end_trial();
+ }
+ }
+
+ // hide image if timing is set
+ if (trial.stimulus_duration !== null) {
+ this.jsPsych.pluginAPI.setTimeout(() => {
+ stimulusElement.style.visibility = "hidden";
+ }, trial.stimulus_duration);
+ }
+
+ // disable all the buttons and set a timeout that enables them after a specified delay if timing is set
+ if (trial.enable_button_after > 0) {
+ var btns = document.querySelectorAll(".jspsych-text-to-speech-button-response-button button");
+ for (var i = 0; i < btns.length; i++) {
+ btns[i].setAttribute("disabled", "disabled");
+ }
+ this.jsPsych.pluginAPI.setTimeout(() => {
+ var btns = document.querySelectorAll(
+ ".jspsych-text-to-speech-button-response-button button"
+ );
+ for (var i = 0; i < btns.length; i++) {
+ btns[i].removeAttribute("disabled");
+ }
+ }, trial.enable_button_after);
+ }
+
+ // end trial if time limit is set
+ if (trial.trial_duration !== null) {
+ this.jsPsych.pluginAPI.setTimeout(end_trial, trial.trial_duration);
+ }
+ }
+
+ simulate(
+ trial: TrialType,
+ simulation_mode,
+ simulation_options: any,
+ load_callback: () => void
+ ) {
+ if (simulation_mode == "data-only") {
+ load_callback();
+ this.simulate_data_only(trial, simulation_options);
+ }
+ if (simulation_mode == "visual") {
+ this.simulate_visual(trial, simulation_options, load_callback);
+ }
+ }
+
+ private create_simulation_data(trial: TrialType, simulation_options) {
+ const default_data = {
+ stimulus: trial.stimulus,
+ rt:
+ this.jsPsych.randomization.sampleExGaussian(500, 50, 1 / 150, true) +
+ trial.enable_button_after,
+ response: this.jsPsych.randomization.randomInt(0, trial.choices.length - 1),
+ };
+
+ const data = this.jsPsych.pluginAPI.mergeSimulationData(default_data, simulation_options);
+
+ this.jsPsych.pluginAPI.ensureSimulationDataConsistency(trial, data);
+
+ return data;
+ }
+
+ private simulate_data_only(trial: TrialType, simulation_options) {
+ const data = this.create_simulation_data(trial, simulation_options);
+
+ this.jsPsych.finishTrial(data);
+ }
+
+ private simulate_visual(trial: TrialType, simulation_options, load_callback: () => void) {
+ const data = this.create_simulation_data(trial, simulation_options);
+
+ const display_element = this.jsPsych.getDisplayElement();
+
+ this.trial(display_element, trial);
+ load_callback();
+
+ if (data.rt !== null) {
+ this.jsPsych.pluginAPI.clickTarget(
+ display_element.querySelector(
+ `#jspsych-text-to-speech-btngroup [data-choice="${data.response}"]`
+ ),
+ data.rt
+ );
+ }
+ }
+}
+
+export default TextToSpeechButtonPluginResponse;
diff --git a/packages/plugin-text-to-speech-button-response/tsconfig.json b/packages/plugin-text-to-speech-button-response/tsconfig.json
new file mode 100644
index 00000000..8a845081
--- /dev/null
+++ b/packages/plugin-text-to-speech-button-response/tsconfig.json
@@ -0,0 +1,8 @@
+{
+ "extends": "@jspsych/config/tsconfig.contrib.json",
+ "compilerOptions": {
+ "baseUrl": ".",
+ "resolveJsonModule": true
+ },
+ "include": ["src"]
+}
diff --git a/packages/plugin-text-to-speech-keyboard-response/README.md b/packages/plugin-text-to-speech-keyboard-response/README.md
new file mode 100644
index 00000000..fe19045a
--- /dev/null
+++ b/packages/plugin-text-to-speech-keyboard-response/README.md
@@ -0,0 +1,35 @@
+# text-to-speech-keyboard-response
+
+## Overview
+
+Displays text, reads to the participant using SpeechSythesis, takes keybaoard presses for responses
+
+## Loading
+
+### In browser
+
+```js
+
+```
+
+### Via NPM
+
+```
+npm install @jspsych-contrib/plugin-text-to-speech-keyboard-response
+```
+
+```js
+import jsPsychTextToSpeechKeyboardResponse from '@jspsych-contrib/plugin-text-to-speech-keyboard-response';
+```
+
+## Compatibility
+
+jsPsych 7.0.0
+
+## Documentation
+
+See [documentation](https://github.com/jspsych/jspsych-contrib/blob/main/packages/plugin-text-to-speech-keyboard-response/docs/jspsych-text-to-speech-keyboard-response.md)
+
+## Author / Citation
+
+[Cian Monnin](https://github.com/CMonnin)
diff --git a/packages/plugin-text-to-speech-keyboard-response/docs/text-to-speech-keyboard-response.md b/packages/plugin-text-to-speech-keyboard-response/docs/text-to-speech-keyboard-response.md
new file mode 100644
index 00000000..fec09f0b
--- /dev/null
+++ b/packages/plugin-text-to-speech-keyboard-response/docs/text-to-speech-keyboard-response.md
@@ -0,0 +1,64 @@
+# text-to-speech-keyboard-response
+
+Displays text, reads to the participant using SpeechSythesis, takes keybaoard presses for responses
+
+## Parameters
+
+In addition to the [parameters available in all plugins](https://jspsych.org/latest/overview/plugins.md#parameters-available-in-all-plugins), this plugin accepts the following parameters. Parameters with a default value of undefined must be specified. Other parameters can be left unspecified if the default value is acceptable.
+
+| Parameter | Type | Default Value | Description |
+| ------------------- | ----------- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------- |
+| stimulus | STRING | undefined | The string to be displayed. |
+| choices | KEYS | "ALL_KEYS" | Array of keys the participant is allowed to press. Default is "ALL_KEYS" meaning all keys are valid responses. "NO_KEYS" means no responses. |
+| lang | STRING | "en-US" | The language of the voice for the speechSynthesis API. Falls back to 'en-US' if unavailable. Depends on the system/browser voices. |
+| prompt | HTML_STRING | null | HTML content displayed below the stimulus, typically a reminder of the action to take. |
+| stimulus_duration | INT | null | Time in milliseconds to display the stimulus. If null, the stimulus remains visible until the trial ends. |
+| trial_duration | INT | null | Time limit in milliseconds for the participant to respond. If null, the trial waits indefinitely for a response. |
+| response_ends_trial | BOOL | true | If true, the trial ends when the participant makes a response. If false, the trial continues until trial_duration is reached. |
+
+## Data Generated
+
+In addition to the [default data collected by all plugins](https://jspsych.org/latest/overview/plugins.md#data-collected-by-all-plugins), this plugin collects the following data for each trial.
+
+| Name | Type | Value |
+| -------- | ------ | -------------------------------------------- |
+| response | STRING | The key the participant pressed. |
+| rt | INT | The response time in milliseconds. |
+| stimulus | STRING | The string that was displayed on the screen. |
+
+## Install
+
+Using the CDN-hosted JavaScript file:
+
+```js
+
+```
+
+Using the JavaScript file downloaded from a GitHub release dist archive:
+
+```js
+
+```
+
+Using NPM:
+
+```
+npm install @jspsych-contrib/plugin-text-to-speech-keyboard-response
+```
+
+```js
+import TextToSpeechKeyboardResponse from '@jspsych-contrib/plugin-text-to-speech-keyboard-response';
+```
+
+## Examples
+
+### Setting SpeechSythesis voice to french
+
+```javascript
+const trial = {
+ stimulus: 'This is a string',
+ prompt: 'Press any key to continue',
+ lang: 'fr-Fr',
+ type: TextToSpeechKeyboardResponse,
+}
+```
diff --git a/packages/plugin-text-to-speech-keyboard-response/examples/index.html b/packages/plugin-text-to-speech-keyboard-response/examples/index.html
new file mode 100644
index 00000000..95bc7819
--- /dev/null
+++ b/packages/plugin-text-to-speech-keyboard-response/examples/index.html
@@ -0,0 +1,23 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/packages/plugin-text-to-speech-keyboard-response/jest.config.cjs b/packages/plugin-text-to-speech-keyboard-response/jest.config.cjs
new file mode 100644
index 00000000..6ac19d5c
--- /dev/null
+++ b/packages/plugin-text-to-speech-keyboard-response/jest.config.cjs
@@ -0,0 +1 @@
+module.exports = require("@jspsych/config/jest").makePackageConfig(__dirname);
diff --git a/packages/plugin-text-to-speech-keyboard-response/package.json b/packages/plugin-text-to-speech-keyboard-response/package.json
new file mode 100644
index 00000000..f1e5c653
--- /dev/null
+++ b/packages/plugin-text-to-speech-keyboard-response/package.json
@@ -0,0 +1,47 @@
+{
+ "name": "@jspsych-contrib/plugin-text-to-speech-keyboard-response",
+ "version": "0.0.1",
+ "description": "Displays text, reads to the participant using SpeechSythesis, takes keybaoard presses for responses",
+ "type": "module",
+ "main": "dist/index.cjs",
+ "exports": {
+ "import": "./dist/index.js",
+ "require": "./dist/index.cjs"
+ },
+ "typings": "dist/index.d.ts",
+ "unpkg": "dist/index.browser.min.js",
+ "files": [
+ "src",
+ "dist"
+ ],
+ "source": "src/index.ts",
+ "scripts": {
+ "test": "jest",
+ "test:watch": "npm test -- --watch",
+ "tsc": "tsc",
+ "build": "rollup --config",
+ "build:watch": "npm run build -- --watch"
+ },
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/jspsych/jspsych-contrib.git",
+ "directory": "packages/plugin-text-to-speech-keyboard-response"
+ },
+ "author": {
+ "name": "Cian Monnin",
+ "url": "https://github.com/CMonnin"
+ },
+ "license": "MIT",
+ "bugs": {
+ "url": "https://github.com/jspsych/jspsych-contrib/issues"
+ },
+ "homepage": "https://github.com/jspsych/jspsych-contrib/tree/main/packages/plugin-text-to-speech-keyboard-response",
+ "peerDependencies": {
+ "jspsych": ">=7.0.0"
+ },
+ "devDependencies": {
+ "@jspsych/config": "^2.0.0",
+ "@jspsych/test-utils": "^1.0.0",
+ "jspsych": "^7.0.0"
+ }
+}
diff --git a/packages/plugin-text-to-speech-keyboard-response/rollup.config.mjs b/packages/plugin-text-to-speech-keyboard-response/rollup.config.mjs
new file mode 100644
index 00000000..14ee6b91
--- /dev/null
+++ b/packages/plugin-text-to-speech-keyboard-response/rollup.config.mjs
@@ -0,0 +1,3 @@
+import { makeRollupConfig } from "@jspsych/config/rollup";
+
+export default makeRollupConfig("jsPsychTextToSpeechKeyboardResponse");
diff --git a/packages/plugin-text-to-speech-keyboard-response/src/index.spec.ts b/packages/plugin-text-to-speech-keyboard-response/src/index.spec.ts
new file mode 100644
index 00000000..8c577c7f
--- /dev/null
+++ b/packages/plugin-text-to-speech-keyboard-response/src/index.spec.ts
@@ -0,0 +1,215 @@
+import { pressKey, simulateTimeline, startTimeline } from "@jspsych/test-utils";
+
+import TextToSpeechKeyboardResponse from ".";
+
+// minimal mock for SpeechSynthesisUtterance
+(global as any).SpeechSynthesisUtterance = class {
+ text: string;
+ lang: string;
+ constructor(text: string) {
+ this.text = text;
+ this.lang = "en-US";
+ }
+
+ speak() {}
+};
+// minimal mock function for speechSynthesis
+(global as any).speechSynthesis = {
+ speak: jest.fn(),
+};
+
+jest.useFakeTimers();
+
+describe("text-to-speech-keyboard-response", () => {
+ it("should call speech synthesis when trial is executed", async () => {
+ const {} = await startTimeline([
+ {
+ stimulus: "this is a string",
+ choices: ["button_choice"],
+ type: TextToSpeechKeyboardResponse,
+ },
+ ]);
+ expect((global as any).speechSynthesis.speak).toHaveBeenCalled();
+ });
+ test("displays string stimulus", async () => {
+ const { getHTML, expectFinished } = await startTimeline([
+ {
+ type: TextToSpeechKeyboardResponse,
+ stimulus: "this is a string",
+ },
+ ]);
+
+ expect(getHTML()).toBe(
+ '
this is a string
'
+ );
+ await pressKey("a");
+ await expectFinished();
+ });
+
+ /* issues with the test, i think it's related to the clickTarget function
+ tried implementing awaiting a new Promise with setTimeout
+ after setting jest.useRealTimers() and then setting jest.useFakeTimers()*/
+ /* test("display clears after key press", async () => {
+ const { getHTML, expectFinished } = await startTimeline([
+ {
+ type: TextToSpeechKeyboardResponse,
+ stimulus: "this is a string",
+ choices: ["f", "j"],
+ },
+ ]);
+ expect(getHTML()).toContain(
+ '
'
+ );
+
+ await pressKey("f");
+
+ expect(
+ document.querySelector("#jspsych-text-to-speech-keyboard-response-stimulus").className
+ ).toBe(" responded");
+
+ await expectRunning();
+ });
+});
+
+describe("text-to-speech-keyboard-response simulation", () => {
+ test("data mode works", async () => {
+ const timeline = [
+ {
+ type: TextToSpeechKeyboardResponse,
+ stimulus: "foo",
+ },
+ ];
+
+ const { expectFinished, getData } = await simulateTimeline(timeline);
+
+ await expectFinished();
+
+ expect(getData().values()[0].rt).toBeGreaterThan(0);
+ expect(typeof getData().values()[0].response).toBe("string");
+ });
+
+ test("visual mode works", async () => {
+ const timeline = [
+ {
+ type: TextToSpeechKeyboardResponse,
+ stimulus: "foo",
+ },
+ ];
+
+ const { expectFinished, expectRunning, getHTML, getData } = await simulateTimeline(
+ timeline,
+ "visual"
+ );
+
+ await expectRunning();
+
+ expect(getHTML()).toContain("foo");
+
+ jest.runAllTimers();
+
+ await expectFinished();
+
+ expect(getData().values()[0].rt).toBeGreaterThan(0);
+ expect(typeof getData().values()[0].response).toBe("string");
+ });
+});
diff --git a/packages/plugin-text-to-speech-keyboard-response/src/index.ts b/packages/plugin-text-to-speech-keyboard-response/src/index.ts
new file mode 100644
index 00000000..cc5066f6
--- /dev/null
+++ b/packages/plugin-text-to-speech-keyboard-response/src/index.ts
@@ -0,0 +1,264 @@
+import { JsPsych, JsPsychPlugin, ParameterType, TrialType } from "jspsych";
+
+import { version } from "../package.json";
+
+const info = {
+ name: "text-to-speech-keyboard-response",
+ version: version,
+ parameters: {
+ /**
+ * The string to be displayed.
+ */
+ stimulus: {
+ type: ParameterType.STRING,
+ default: undefined,
+ },
+ /**
+ * This array contains the key(s) that the participant is allowed to press in order to respond
+ * to the stimulus. Keys should be specified as characters (e.g., `'a'`, `'q'`, `' '`, `'Enter'`, `'ArrowDown'`) - see
+ * {@link https://developer.mozilla.org/en-US/docs/Web/API/UI_Events/Keyboard_event_key_values this page}
+ * and
+ * {@link https://www.freecodecamp.org/news/javascript-keycode-list-keypress-event-key-codes/ this page (event.key column)}
+ * for more examples. Any key presses that are not listed in the
+ * array will be ignored. The default value of `"ALL_KEYS"` means that all keys will be accepted as valid responses.
+ * Specifying `"NO_KEYS"` will mean that no responses are allowed.
+ */
+ choices: {
+ type: ParameterType.KEYS,
+ default: "ALL_KEYS",
+ },
+ /**
+ * This is for set the languge of voice of the speechSynthesis API
+ * Fallback to 'en-US'
+ * These depend on the voices avaiable to the system.
+ * Some browsers come with local languges, e.g. Google Chrome comes with a number of languages like 'en-US', 'en-GB', 'fr-FR', 'de-DE' ... etc.
+ * Firefox comes with none and depends on the system to have voices for speechSynthesis
+ */
+ lang: {
+ type: ParameterType.STRING,
+ default: "en-US",
+ },
+ /**
+ * This string can contain HTML markup. Any content here will be displayed below the stimulus.
+ * The intention is that it can be used to provide a reminder about the action the participant
+ * is supposed to take (e.g., which key to press).
+ */
+ prompt: {
+ type: ParameterType.HTML_STRING,
+ default: null,
+ },
+ /**
+ * How long to display the stimulus in milliseconds. The visibility CSS property of the stimulus
+ * will be set to `hidden` after this time has elapsed. If this is null, then the stimulus will
+ * remain visible until the trial ends.
+ */
+ stimulus_duration: {
+ type: ParameterType.INT,
+ default: null,
+ },
+ /**
+ * How long to wait for the participant to make a response before ending the trial in milliseconds.
+ * If the participant fails to make a response before this timer is reached, the participant's response
+ * will be recorded as null for the trial and the trial will end. If the value of this parameter is null,
+ * then the trial will wait for a response indefinitely.
+ */
+ trial_duration: {
+ type: ParameterType.INT,
+ default: null,
+ },
+ /**
+ * If true, then the trial will end whenever the participant makes a response (assuming they make their
+ * response before the cutoff specified by the trial_duration parameter). If false, then the trial will
+ * continue until the value for trial_duration is reached. You can set this parameter to false to force
+ * the participant to view a stimulus for a fixed amount of time, even if they respond before the time is complete.
+ */
+ response_ends_trial: {
+ type: ParameterType.BOOL,
+ default: true,
+ },
+ /** A pause between words in milliseconds */
+ time_between_words: {
+ type: ParameterType.INT,
+ default: 0,
+ },
+ },
+ data: {
+ /** Indicates which key the participant pressed. */
+ response: {
+ type: ParameterType.STRING,
+ },
+ /** The response time in milliseconds for the participant to make a response. The time is measured from when the stimulus first appears on the screen until the participant's response. */
+ rt: {
+ type: ParameterType.INT,
+ },
+ /** The HTML content that was displayed on the screen. */
+ stimulus: {
+ type: ParameterType.STRING,
+ },
+ },
+};
+
+type Info = typeof info;
+
+/**
+ * **text-to-speech-keyboard-response**
+ *
+ * Displays text, reads to the participant using SpeechSythesis, takes keybaoard presses for responses
+ *
+ * @author Cian Monnin
+ * @see {@link https://github.com/jspsych/jspsych-contrib/packages/plugin-text-to-speech-keyboard-response/README.md}}
+ */
+class TextToSpeechKeyboardResponsePlugin implements JsPsychPlugin {
+ static info = info;
+ constructor(private jsPsych: JsPsych) {}
+
+ trial(display_element: HTMLElement, trial: TrialType) {
+ // Display stimulus
+ const stimulusElement = document.createElement("div");
+ stimulusElement.id = "jspsych-text-to-speech-keyboard-response-stimulus";
+ stimulusElement.innerHTML = trial.stimulus;
+ display_element.appendChild(stimulusElement);
+ // add prompt
+ if (trial.prompt !== null) {
+ display_element.insertAdjacentHTML("beforeend", trial.prompt);
+ }
+ // Set up SpeechSytnthesis
+ const words = trial.stimulus.split(" ");
+ let currentIndex = 0;
+
+ // start time
+
+ function speakNextWord() {
+ if (currentIndex < words.length) {
+ const utterance = new SpeechSynthesisUtterance(words[currentIndex]);
+ utterance.lang = trial.lang;
+
+ utterance.onend = () => {
+ setTimeout(() => {
+ currentIndex++;
+ speakNextWord();
+ }, trial.time_between_words);
+ };
+ speechSynthesis.speak(utterance);
+ }
+ }
+ speakNextWord();
+
+ // store response
+ var response = {
+ rt: null,
+ key: null,
+ };
+
+ // function to end trial when it is time
+ const end_trial = () => {
+ // kill keyboard listeners
+ if (typeof keyboardListener !== "undefined") {
+ this.jsPsych.pluginAPI.cancelKeyboardResponse(keyboardListener);
+ }
+
+ // gather the data to store for the trial
+ var trial_data = {
+ rt: response.rt,
+ stimulus: trial.stimulus,
+ response: response.key,
+ };
+
+ // move on to the next trial
+ this.jsPsych.finishTrial(trial_data);
+ };
+
+ // function to handle responses by the subject
+ var after_response = (info) => {
+ // after a valid response, the stimulus will have the CSS class 'responded'
+ // which can be used to provide visual feedback that a response was recorded
+ display_element.querySelector(
+ "#jspsych-text-to-speech-keyboard-response-stimulus"
+ ).className += " responded";
+
+ // only record the first response
+ if (response.key == null) {
+ response = info;
+ }
+
+ if (trial.response_ends_trial) {
+ end_trial();
+ }
+ };
+
+ // start the response listener
+ if (trial.choices != "NO_KEYS") {
+ var keyboardListener = this.jsPsych.pluginAPI.getKeyboardResponse({
+ callback_function: after_response,
+ valid_responses: trial.choices,
+ rt_method: "performance",
+ persist: false,
+ allow_held_key: false,
+ });
+ }
+
+ // hide stimulus if stimulus_duration is set
+ if (trial.stimulus_duration !== null) {
+ this.jsPsych.pluginAPI.setTimeout(() => {
+ display_element.querySelector(
+ "#jspsych-text-to-speech-keyboard-response-stimulus"
+ ).style.visibility = "hidden";
+ }, trial.stimulus_duration);
+ }
+
+ // end trial if trial_duration is set
+ if (trial.trial_duration !== null) {
+ this.jsPsych.pluginAPI.setTimeout(end_trial, trial.trial_duration);
+ }
+ }
+
+ simulate(
+ trial: TrialType,
+ simulation_mode,
+ simulation_options: any,
+ load_callback: () => void
+ ) {
+ if (simulation_mode == "data-only") {
+ load_callback();
+ this.simulate_data_only(trial, simulation_options);
+ }
+ if (simulation_mode == "visual") {
+ this.simulate_visual(trial, simulation_options, load_callback);
+ }
+ }
+
+ private create_simulation_data(trial: TrialType, simulation_options) {
+ const default_data = {
+ stimulus: trial.stimulus,
+ rt: this.jsPsych.randomization.sampleExGaussian(500, 50, 1 / 150, true),
+ response: this.jsPsych.pluginAPI.getValidKey(trial.choices),
+ };
+
+ const data = this.jsPsych.pluginAPI.mergeSimulationData(default_data, simulation_options);
+
+ this.jsPsych.pluginAPI.ensureSimulationDataConsistency(trial, data);
+
+ return data;
+ }
+
+ private simulate_data_only(trial: TrialType, simulation_options) {
+ const data = this.create_simulation_data(trial, simulation_options);
+
+ this.jsPsych.finishTrial(data);
+ }
+
+ private simulate_visual(trial: TrialType, simulation_options, load_callback: () => void) {
+ const data = this.create_simulation_data(trial, simulation_options);
+
+ const display_element = this.jsPsych.getDisplayElement();
+
+ this.trial(display_element, trial);
+ load_callback();
+
+ if (data.rt !== null) {
+ this.jsPsych.pluginAPI.pressKey(data.response, data.rt);
+ }
+ }
+}
+
+export default TextToSpeechKeyboardResponsePlugin;
diff --git a/packages/plugin-text-to-speech-keyboard-response/tsconfig.json b/packages/plugin-text-to-speech-keyboard-response/tsconfig.json
new file mode 100644
index 00000000..8a845081
--- /dev/null
+++ b/packages/plugin-text-to-speech-keyboard-response/tsconfig.json
@@ -0,0 +1,8 @@
+{
+ "extends": "@jspsych/config/tsconfig.contrib.json",
+ "compilerOptions": {
+ "baseUrl": ".",
+ "resolveJsonModule": true
+ },
+ "include": ["src"]
+}