diff --git a/README.md b/README.md index 785ff5a..ca1f2fc 100644 --- a/README.md +++ b/README.md @@ -38,8 +38,9 @@ expo-speech-recognition implements the iOS [`SFSpeechRecognizer`](https://develo - [getSpeechRecognitionServices()](#getspeechrecognitionservices-string-android-only) - [getDefaultRecognitionService()](#getdefaultrecognitionservice--packagename-string--android-only) - [getAssistantService()](#getassistantservice--packagename-string--android-only) + - [isRecognitionAvailable()](#isrecognitionavailable-boolean) - [supportsOnDeviceRecognition()](#supportsondevicerecognition-boolean) - - [supportsRecording()](#supportsrecording-boolean-android-only) + - [supportsRecording()](#supportsrecording-boolean) - [androidTriggerOfflineModelDownload()](#androidtriggerofflinemodeldownload-locale-string--promise-status-opened_dialog--download_success--download_canceled-message-string-) - [setCategoryIOS()](#setcategoryios-void-ios-only) - [getAudioSessionCategoryAndOptionsIOS()](#getaudiosessioncategoryandoptionsios-ios-only) @@ -307,7 +308,7 @@ Events are largely based on the [Web Speech API](https://developer.mozilla.org/e | ------------- | ------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `audiostart` | Audio capturing has started | Includes the `uri` if `recordingOptions.persist` is enabled. | | `audioend` | Audio capturing has ended | Includes the `uri` if `recordingOptions.persist` is enabled. | -| `end` | Speech recognition service has disconnected. | This should be the last event dispatched. | +| `end` | Speech recognition service has disconnected. | This should always be the last event dispatched, including after errors. | | `error` | Fired when a speech recognition error occurs. | You'll also receive an `error` event (with code "aborted") when calling `.abort()` | | `nomatch` | Speech recognition service returns a final result with no significant recognition. | You may have non-final results recognized. This may get emitted after cancellation. | | `result` | Speech recognition service returns a word or phrase has been positively recognized. | On Android, continous mode runs as a segmented session, meaning when a final result is reached, additional partial and final results will cover a new segment separate from the previous final result. On iOS, you should expect one final result before speech recognition has stopped. | @@ -361,7 +362,7 @@ The error code is based on the [Web Speech API error codes](https://developer.mo If you would like to persist the recognized audio for later use, you can enable the `recordingOptions.persist` option when calling `start()`. Enabling this setting will emit an `{ uri: string }` event object in the `audiostart` and `audioend` events with the local file path. > [!IMPORTANT] -> This feature is available on Android 13+ and iOS. Call `supportsRecording()` to see if it's available before using this feature. +> This feature is available on Android 13+ and iOS. Call [`supportsRecording()`](#supportsrecording-boolean) to see if it's available before using this feature. Default audio output formats: @@ -838,9 +839,9 @@ Get list of speech recognition services available on the device. > This only includes services that are listed under `androidSpeechServicePackages` in your app.json as well as the core services listed under `forceQueryable` when running the command: `adb shell dumpsys package queries` ```ts -import { ExpoSpeechRecognitionModule } from "expo-speech-recognition"; +import { getSpeechRecognitionServices } from "expo-speech-recognition"; -const packages = ExpoSpeechRecognitionModule.getSpeechRecognitionServices(); +const packages = getSpeechRecognitionServices(); console.log("Speech recognition services:", packages.join(", ")); // e.g. ["com.google.android.as", "com.google.android.tts", "com.samsung.android.bixby.agent"] ``` @@ -850,9 +851,9 @@ console.log("Speech recognition services:", packages.join(", ")); Returns the default voice recognition service on the device. ```ts -import { ExpoSpeechRecognitionModule } from "expo-speech-recognition"; +import { getDefaultRecognitionService } from "expo-speech-recognition"; -const service = ExpoSpeechRecognitionModule.getDefaultRecognitionService(); +const service = getDefaultRecognitionService(); console.log("Default recognition service:", service.packageName); // Usually this is "com.google.android.tts" on Android 13+ and "com.google.android.googlequicksearchbox" on Android <=12. // For on-device recognition, "com.google.android.as" will likely be used. @@ -863,17 +864,32 @@ console.log("Default recognition service:", service.packageName); Returns the default voice assistant service on the device. ```ts -import { ExpoSpeechRecognitionModule } from "expo-speech-recognition"; +import { getAssistantService } from "expo-speech-recognition"; -const service = ExpoSpeechRecognitionModule.getAssistantService(); +const service = getAssistantService(); console.log("Default assistant service:", service.packageName); // Usually "com.google.android.googlequicksearchbox" for Google // or "com.samsung.android.bixby.agent" for Samsung ``` +### `isRecognitionAvailable(): boolean` + +Whether speech recognition is currently available on the device. + +If this method returns false, calling `start()` will fail and emit an error event with the code `service-not-allowed` or `language-not-supported`. You should also ask the user to enable speech recognition in the system settings (i.e, for iOS to enable Siri & Dictation). On Android, you should ask the user to install and enable `com.google.android.tts` (Android 13+) or `com.google.android.googlequicksearchbox` (Android <= 12) as a default voice recognition service. + +For Web, this method only checks if the browser has the Web SpeechRecognition API available, however keep in mind that browsers (like Brave) may still have the APIs but not have it implemented yet. Refer to [Platform Compatibility Table](#platform-compatibility-table) for more information. You may want to use a user agent parser to fill in the gaps. + +```ts +import { isRecognitionAvailable } from "expo-speech-recognition"; + +const available = isRecognitionAvailable(); +console.log("Speech recognition available:", available); +``` + ### `supportsOnDeviceRecognition(): boolean` -Whether on-device speech recognition is available on the device. +Whether the device supports on-device speech recognition. ```ts import { supportsOnDeviceRecognition } from "expo-speech-recognition"; @@ -882,7 +898,7 @@ const available = supportsOnDeviceRecognition(); console.log("OnDevice recognition available:", available); ``` -### `supportsRecording(): boolean` (Android only) +### `supportsRecording(): boolean` Whether audio recording is supported during speech recognition. This mostly applies to Android devices, to check if it's at least Android 13. @@ -902,10 +918,10 @@ You can see which locales are supported and installed on your device by running To download the offline model for a specific locale, use the `androidTriggerOfflineModelDownload` function. ```ts -import { ExpoSpeechRecognitionModule } from "expo-speech-recognition"; +import { androidTriggerOfflineModelDownload } from "expo-speech-recognition"; // Download the offline model for the specified locale -ExpoSpeechRecognitionModule.androidTriggerOfflineModelDownload({ +androidTriggerOfflineModelDownload({ locale: "en-US", }) .then((result) => { diff --git a/android/src/main/java/expo/modules/speechrecognition/ExpoSpeechRecognitionModule.kt b/android/src/main/java/expo/modules/speechrecognition/ExpoSpeechRecognitionModule.kt index 9d04d1b..9d84cd1 100644 --- a/android/src/main/java/expo/modules/speechrecognition/ExpoSpeechRecognitionModule.kt +++ b/android/src/main/java/expo/modules/speechrecognition/ExpoSpeechRecognitionModule.kt @@ -183,6 +183,10 @@ class ExpoSpeechRecognitionModule : Module() { } } + Function("isRecognitionAvailable") { + SpeechRecognizer.isRecognitionAvailable(appContext.reactContext!!) + } + Function("supportsRecording") { Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU } diff --git a/example/App.tsx b/example/App.tsx index 403432b..2436bf3 100644 --- a/example/App.tsx +++ b/example/App.tsx @@ -786,6 +786,15 @@ function OtherSettings(props: { }); }} /> + { + const isAvailable = + ExpoSpeechRecognitionModule.isRecognitionAvailable(); + Alert.alert("isRecognitionAvailable()", isAvailable.toString()); + }} + /> {Platform.OS === "ios" && ( Bool in - let recognizer: SFSpeechRecognizer? = SFSpeechRecognizer() + return true + } + + Function("isRecognitionAvailable") { () -> Bool in + let recognizer = SFSpeechRecognizer() return recognizer?.isAvailable ?? false } diff --git a/ios/ExpoSpeechRecognizer.swift b/ios/ExpoSpeechRecognizer.swift index 6f9bbb0..4853a07 100644 --- a/ios/ExpoSpeechRecognizer.swift +++ b/ios/ExpoSpeechRecognizer.swift @@ -23,7 +23,6 @@ enum RecognizerError: Error { } } -/// A helper for transcribing speech to text using SFSpeechRecognizer and AVAudioEngine. actor ExpoSpeechRecognizer: ObservableObject { private var options: SpeechRecognitionOptions? diff --git a/package.json b/package.json index cad5dd0..fbf5a0c 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "expo-speech-recognition", - "version": "0.2.20", + "version": "0.2.21", "description": "Speech Recognition for React Native Expo projects", "main": "build/index.js", "types": "build/index.d.ts", diff --git a/src/ExpoSpeechRecognitionModule.ts b/src/ExpoSpeechRecognitionModule.ts index 6dae12a..baf408d 100644 --- a/src/ExpoSpeechRecognitionModule.ts +++ b/src/ExpoSpeechRecognitionModule.ts @@ -31,6 +31,8 @@ export const ExpoSpeechRecognitionModule: ExpoSpeechRecognitionModuleType = { ExpoSpeechRecognitionNativeModule.supportsOnDeviceRecognition(), supportsRecording: () => ExpoSpeechRecognitionNativeModule.supportsRecording(), + isRecognitionAvailable: () => + ExpoSpeechRecognitionNativeModule.isRecognitionAvailable(), }; export const ExpoSpeechRecognitionModuleEmitter = new EventEmitter( diff --git a/src/ExpoSpeechRecognitionModule.types.ts b/src/ExpoSpeechRecognitionModule.types.ts index d868de6..5d841aa 100644 --- a/src/ExpoSpeechRecognitionModule.types.ts +++ b/src/ExpoSpeechRecognitionModule.types.ts @@ -570,6 +570,13 @@ export interface ExpoSpeechRecognitionModuleType extends NativeModule { * This mostly applies to Android devices, to check if it's greater than Android 13. */ supportsRecording(): boolean; + /** + * Whether on-device speech recognition is available. + * + * If this method returns false, `start()` will fail and emit an error event with the code `service-not-allowed` or `language-not-supported`. + */ + isRecognitionAvailable(): boolean; + /** * Downloads the offline model for the specified locale. * Note: this is only supported on Android 13 and above. diff --git a/src/ExpoSpeechRecognitionModule.web.ts b/src/ExpoSpeechRecognitionModule.web.ts index 9f20633..0679570 100644 --- a/src/ExpoSpeechRecognitionModule.web.ts +++ b/src/ExpoSpeechRecognitionModule.web.ts @@ -82,31 +82,19 @@ export const ExpoSpeechRecognitionModule: ExpoSpeechRecognitionModuleType = { ); }, getSpeechRecognitionServices: () => { - console.warn( - "getSpeechRecognitionServices is not supported on web. Returning an empty array.", - ); return [] as string[]; }, getDefaultRecognitionService: () => { - console.warn( - "getDefaultRecognitionService is not supported on web. Returning an empty object.", - ); return { packageName: "", }; }, getAssistantService: () => { - console.warn( - "getAssistantService is not supported on web. Returning an empty object.", - ); return { packageName: "", }; }, supportsOnDeviceRecognition: () => { - console.warn( - "supportsOnDeviceRecognition is not supported on web. Returning false.", - ); return false; }, supportsRecording: () => { @@ -137,6 +125,13 @@ export const ExpoSpeechRecognitionModule: ExpoSpeechRecognitionModuleType = { setAudioSessionActiveIOS: () => { console.warn("setAudioSessionActiveIOS is not supported on web."); }, + isRecognitionAvailable: () => { + const hasSpeechRecognitionAPI = + typeof webkitSpeechRecognition !== "undefined" || + typeof SpeechRecognition !== "undefined"; + + return hasSpeechRecognitionAPI; + }, }; /** @@ -176,6 +171,7 @@ const webToNativeEventMap: { start: (ev) => null, soundend: (ev) => null, }; + export const ExpoSpeechRecognitionModuleEmitter = { _nativeListeners: new Map() as Map void>>, _clientListeners: new Map() as Map< diff --git a/src/index.ts b/src/index.ts index cb01f7c..0ca17cc 100644 --- a/src/index.ts +++ b/src/index.ts @@ -51,6 +51,18 @@ export const getAudioSessionCategoryAndOptionsIOS = export const setAudioSessionActiveIOS = ExpoSpeechRecognitionModule.setAudioSessionActiveIOS; +export const androidTriggerOfflineModelDownload = + ExpoSpeechRecognitionModule.androidTriggerOfflineModelDownload; + +export const isRecognitionAvailable = + ExpoSpeechRecognitionModule.isRecognitionAvailable; + +export const getDefaultRecognitionService = + ExpoSpeechRecognitionModule.getDefaultRecognitionService; + +export const getAssistantService = + ExpoSpeechRecognitionModule.getAssistantService; + export const addSpeechRecognitionListener = < T extends keyof ExpoSpeechRecognitionNativeEventMap, >(