From 0c5be2182ab185a7d769796ad452cee15abc5c5d Mon Sep 17 00:00:00 2001 From: Daniel Zarifpour Date: Thu, 1 Jun 2023 00:23:28 -0400 Subject: [PATCH 1/5] feat: add black and ruff --- .gitignore | 4 +- AudioRecorder.py | 44 +- AudioTranscriber.py | 66 +- GPTResponder.py | 38 +- Makefile | 21 + README.md | 28 +- TranscriberModels.py | 28 +- custom_speech_recognition/__init__.py | 2049 ++++++++++++----- custom_speech_recognition/__main__.py | 17 +- custom_speech_recognition/audio.py | 211 +- .../recognizers/whisper.py | 11 +- main.py | 124 +- prompts.py | 10 +- pyproject.toml | 8 + requirements.txt | 3 + 15 files changed, 1853 insertions(+), 809 deletions(-) create mode 100644 Makefile create mode 100644 pyproject.toml diff --git a/.gitignore b/.gitignore index 5da200f..f1b6a50 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,6 @@ __pycache__/ *.wav keys.py -.venv/ \ No newline at end of file +.venv/ +.DS_Store +ecout_env \ No newline at end of file diff --git a/AudioRecorder.py b/AudioRecorder.py index 810bf96..39cfada 100644 --- a/AudioRecorder.py +++ b/AudioRecorder.py @@ -1,11 +1,14 @@ -import custom_speech_recognition as sr -import pyaudiowpatch as pyaudio from datetime import datetime +import pyaudiowpatch as pyaudio + +import custom_speech_recognition as sr + RECORD_TIMEOUT = 3 ENERGY_THRESHOLD = 1000 DYNAMIC_ENERGY_THRESHOLD = False + class BaseRecorder: def __init__(self, source, source_name): self.recorder = sr.Recognizer() @@ -21,23 +24,31 @@ def adjust_for_noise(self, device_name, msg): print(f"[INFO] Completed ambient noise adjustment for {device_name}.") def record_into_queue(self, audio_queue): - def record_callback(_, audio:sr.AudioData) -> None: + def record_callback(_, audio: sr.AudioData) -> None: data = audio.get_raw_data() audio_queue.put((self.source_name, data, datetime.utcnow())) - self.recorder.listen_in_background(self.source, record_callback, phrase_time_limit=RECORD_TIMEOUT) + self.recorder.listen_in_background( + self.source, record_callback, phrase_time_limit=RECORD_TIMEOUT + ) + class DefaultMicRecorder(BaseRecorder): def __init__(self): super().__init__(source=sr.Microphone(sample_rate=16000), source_name="You") - self.adjust_for_noise("Default Mic", "Please make some noise from the Default Mic...") + self.adjust_for_noise( + "Default Mic", "Please make some noise from the Default Mic..." + ) + class DefaultSpeakerRecorder(BaseRecorder): def __init__(self): with pyaudio.PyAudio() as p: wasapi_info = p.get_host_api_info_by_type(pyaudio.paWASAPI) - default_speakers = p.get_device_info_by_index(wasapi_info["defaultOutputDevice"]) - + default_speakers = p.get_device_info_by_index( + wasapi_info["defaultOutputDevice"] + ) + if not default_speakers["isLoopbackDevice"]: for loopback in p.get_loopback_device_info_generator(): if default_speakers["name"] in loopback["name"]: @@ -45,11 +56,16 @@ def __init__(self): break else: print("[ERROR] No loopback device found.") - - source = sr.Microphone(speaker=True, - device_index= default_speakers["index"], - sample_rate=int(default_speakers["defaultSampleRate"]), - chunk_size=pyaudio.get_sample_size(pyaudio.paInt16), - channels=default_speakers["maxInputChannels"]) + + source = sr.Microphone( + speaker=True, + device_index=default_speakers["index"], + sample_rate=int(default_speakers["defaultSampleRate"]), + chunk_size=pyaudio.get_sample_size(pyaudio.paInt16), + channels=default_speakers["maxInputChannels"], + ) super().__init__(source=source, source_name="Speaker") - self.adjust_for_noise("Default Speaker", "Please make or play some noise from the Default Speaker...") \ No newline at end of file + self.adjust_for_noise( + "Default Speaker", + "Please make or play some noise from the Default Speaker...", + ) diff --git a/AudioTranscriber.py b/AudioTranscriber.py index a29dc69..c46e62f 100644 --- a/AudioTranscriber.py +++ b/AudioTranscriber.py @@ -1,19 +1,20 @@ -import whisper -import torch -import wave +import io import os import threading -from tempfile import NamedTemporaryFile -import custom_speech_recognition as sr -import io +import wave from datetime import timedelta -import pyaudiowpatch as pyaudio from heapq import merge +from tempfile import NamedTemporaryFile + +import pyaudiowpatch as pyaudio + +import custom_speech_recognition as sr PHRASE_TIMEOUT = 3.05 MAX_PHRASES = 10 + class AudioTranscriber: def __init__(self, mic_source, speaker_source, model): self.transcript_data = {"You": [], "Speaker": []} @@ -24,20 +25,20 @@ def __init__(self, mic_source, speaker_source, model): "sample_rate": mic_source.SAMPLE_RATE, "sample_width": mic_source.SAMPLE_WIDTH, "channels": mic_source.channels, - "last_sample": bytes(), + "last_sample": b"", "last_spoken": None, "new_phrase": True, - "process_data_func": self.process_mic_data + "process_data_func": self.process_mic_data, }, "Speaker": { "sample_rate": speaker_source.SAMPLE_RATE, "sample_width": speaker_source.SAMPLE_WIDTH, "channels": speaker_source.channels, - "last_sample": bytes(), + "last_sample": b"", "last_spoken": None, "new_phrase": True, - "process_data_func": self.process_speaker_data - } + "process_data_func": self.process_speaker_data, + }, } def transcribe_audio_queue(self, audio_queue): @@ -46,7 +47,7 @@ def transcribe_audio_queue(self, audio_queue): self.update_last_sample_and_phrase_status(who_spoke, data, time_spoken) source_info = self.audio_sources[who_spoke] - text = '' + text = "" temp_file = NamedTemporaryFile(delete=False, suffix=".wav") source_info["process_data_func"](source_info["last_sample"], temp_file.name) text = self.audio_model.get_transcription(temp_file.name) @@ -54,29 +55,35 @@ def transcribe_audio_queue(self, audio_queue): temp_file.close() os.unlink(temp_file.name) - if text != '' and text.lower() != 'you': + if text != "" and text.lower() != "you": self.update_transcript(who_spoke, text, time_spoken) self.transcript_changed_event.set() def update_last_sample_and_phrase_status(self, who_spoke, data, time_spoken): source_info = self.audio_sources[who_spoke] - if source_info["last_spoken"] and time_spoken - source_info["last_spoken"] > timedelta(seconds=PHRASE_TIMEOUT): - source_info["last_sample"] = bytes() + if source_info["last_spoken"] and time_spoken - source_info[ + "last_spoken" + ] > timedelta(seconds=PHRASE_TIMEOUT): + source_info["last_sample"] = b"" source_info["new_phrase"] = True else: source_info["new_phrase"] = False source_info["last_sample"] += data - source_info["last_spoken"] = time_spoken + source_info["last_spoken"] = time_spoken def process_mic_data(self, data, temp_file_name): - audio_data = sr.AudioData(data, self.audio_sources["You"]["sample_rate"], self.audio_sources["You"]["sample_width"]) + audio_data = sr.AudioData( + data, + self.audio_sources["You"]["sample_rate"], + self.audio_sources["You"]["sample_width"], + ) wav_data = io.BytesIO(audio_data.get_wav_data()) - with open(temp_file_name, 'w+b') as f: + with open(temp_file_name, "w+b") as f: f.write(wav_data.read()) def process_speaker_data(self, data, temp_file_name): - with wave.open(temp_file_name, 'wb') as wf: + with wave.open(temp_file_name, "wb") as wf: wf.setnchannels(self.audio_sources["Speaker"]["channels"]) p = pyaudio.PyAudio() wf.setsampwidth(p.get_sample_size(pyaudio.paInt16)) @@ -95,18 +102,23 @@ def update_transcript(self, who_spoke, text, time_spoken): transcript[0] = (f"{who_spoke}: [{text}]\n\n", time_spoken) def get_transcript(self): - combined_transcript = list(merge( - self.transcript_data["You"], self.transcript_data["Speaker"], - key=lambda x: x[1], reverse=True)) + combined_transcript = list( + merge( + self.transcript_data["You"], + self.transcript_data["Speaker"], + key=lambda x: x[1], + reverse=True, + ) + ) combined_transcript = combined_transcript[:MAX_PHRASES] return "".join([t[0] for t in combined_transcript]) - + def clear_transcript_data(self): self.transcript_data["You"].clear() self.transcript_data["Speaker"].clear() - self.audio_sources["You"]["last_sample"] = bytes() - self.audio_sources["Speaker"]["last_sample"] = bytes() + self.audio_sources["You"]["last_sample"] = b"" + self.audio_sources["Speaker"]["last_sample"] = b"" self.audio_sources["You"]["new_phrase"] = True - self.audio_sources["Speaker"]["new_phrase"] = True \ No newline at end of file + self.audio_sources["Speaker"]["new_phrase"] = True diff --git a/GPTResponder.py b/GPTResponder.py index 2adab72..9527d2f 100644 --- a/GPTResponder.py +++ b/GPTResponder.py @@ -1,26 +1,30 @@ +import time + import openai from keys import OPENAI_API_KEY -from prompts import create_prompt, INITIAL_RESPONSE -import time + +from prompts import INITIAL_RESPONSE, create_prompt openai.api_key = OPENAI_API_KEY + def generate_response_from_transcript(transcript): try: response = openai.ChatCompletion.create( - model="gpt-3.5-turbo-0301", - messages=[{"role": "system", "content": create_prompt(transcript)}], - temperature = 0.0 + model="gpt-3.5-turbo-0301", + messages=[{"role": "system", "content": create_prompt(transcript)}], + temperature=0.0, ) except Exception as e: print(e) - return '' + return "" full_response = response.choices[0].message.content try: - return full_response.split('[')[1].split(']')[0] - except: - return '' - + return full_response.split("[")[1].split("]")[0] + except IndexError: + return "" + + class GPTResponder: def __init__(self): self.response = INITIAL_RESPONSE @@ -31,14 +35,16 @@ def respond_to_transcriber(self, transcriber): if transcriber.transcript_changed_event.is_set(): start_time = time.time() - transcriber.transcript_changed_event.clear() + transcriber.transcript_changed_event.clear() transcript_string = transcriber.get_transcript() response = generate_response_from_transcript(transcript_string) - + end_time = time.time() # Measure end time - execution_time = end_time - start_time # Calculate the time it took to execute the function - - if response != '': + + # Calculate the time it took to execute the function + execution_time = end_time - start_time + + if response != "": self.response = response remaining_time = self.response_interval - execution_time @@ -48,4 +54,4 @@ def respond_to_transcriber(self, transcriber): time.sleep(0.3) def update_response_interval(self, interval): - self.response_interval = interval \ No newline at end of file + self.response_interval = interval diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..4122c0b --- /dev/null +++ b/Makefile @@ -0,0 +1,21 @@ +lint: + @echo + ruff . + @echo + black --check --diff --color . + @echo + pip-audit + +format: + ruff --silent --exit-zero --fix . + black . + +precommit: + make lint + make format + +venv: + python3 -m venv ecout_env + +install: + pip install -r requirements.txt \ No newline at end of file diff --git a/README.md b/README.md index 6aed437..a502ba1 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ Ecoute is a live transcription tool that provides real-time transcripts for both ## 📖 Demo -https://github.com/SevaSk/ecoute/assets/50382291/8ac48927-8a26-49fd-80e9-48f980986208 + Ecoute is designed to help users in their conversations by providing live transcriptions and generating contextually relevant responses. By leveraging the power of OpenAI's GPT-3.5, Ecoute aims to make communication more efficient and enjoyable. @@ -15,21 +15,25 @@ Follow these steps to set up and run Ecoute on your local machine. ### 📋 Prerequisites -- Python 3.x +- Python >=3.8.0 - An OpenAI API key - Windows OS (Not tested on others) -- FFmpeg +- FFmpeg If FFmpeg is not installed in your system, you can follow the steps below to install it. First, you need to install Chocolatey, a package manager for Windows. Open your PowerShell as Administrator and run the following command: + ``` Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) ``` + Once Chocolatey is installed, you can install FFmpeg by running the following command in your PowerShell: + ``` choco install ffmpeg-full ``` + Please ensure that you run these commands in a PowerShell window with administrator privileges. If you face any issues during the installation, you can visit the official Chocolatey and FFmpeg websites for troubleshooting. ### 🔧 Installation @@ -51,7 +55,7 @@ Please ensure that you run these commands in a PowerShell window with administra ``` pip install -r requirements.txt ``` - + 4. Create a `keys.py` file in the ecoute directory and add your OpenAI API key: - Option 1: You can utilize a command on your command prompt. Run the following command, ensuring to replace "API KEY" with your actual OpenAI API key: @@ -61,10 +65,11 @@ Please ensure that you run these commands in a PowerShell window with administra ``` - Option 2: You can create the keys.py file manually. Open up your text editor of choice and enter the following content: - + ``` OPENAI_API_KEY="API KEY" ``` + Replace "API KEY" with your actual OpenAI API key. Save this file as keys.py within the ecoute directory. ### 🎬 Running Ecoute @@ -102,3 +107,16 @@ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file ## 🤝 Contributing Contributions are welcome! Feel free to open issues or submit pull requests to improve Ecoute. + +### Installation + +1. `make venv` +2. Activate the venv: `ecout_venv` +3. `make install` + +### Code quality + +Before submitting a pull request run `make precommit` and resolve any issues. Additionally, here are some useful commands: + +- `make lint` +- `make format` diff --git a/TranscriberModels.py b/TranscriberModels.py index 60a3dd8..ec84e30 100644 --- a/TranscriberModels.py +++ b/TranscriberModels.py @@ -1,7 +1,9 @@ -import openai -import whisper import os + +import openai import torch +import whisper + def get_model(use_api): if use_api: @@ -9,26 +11,30 @@ def get_model(use_api): else: return WhisperTranscriber() + class WhisperTranscriber: def __init__(self): - self.audio_model = whisper.load_model(os.path.join(os.getcwd(), 'tiny.en.pt')) - print(f"[INFO] Whisper using GPU: " + str(torch.cuda.is_available())) + self.audio_model = whisper.load_model(os.path.join(os.getcwd(), "tiny.en.pt")) + print("[INFO] Whisper using GPU: " + str(torch.cuda.is_available())) def get_transcription(self, wav_file_path): try: - result = self.audio_model.transcribe(wav_file_path, fp16=torch.cuda.is_available()) + result = self.audio_model.transcribe( + wav_file_path, fp16=torch.cuda.is_available() + ) except Exception as e: print(e) - return '' - return result['text'].strip() - + return "" + return result["text"].strip() + + class APIWhisperTranscriber: def get_transcription(self, wav_file_path): - audio_file= open(wav_file_path, "rb") + audio_file = open(wav_file_path, "rb") try: result = openai.Audio.translate("whisper-1", audio_file) except Exception as e: print(e) - return '' + return "" - return result['text'].strip() \ No newline at end of file + return result["text"].strip() diff --git a/custom_speech_recognition/__init__.py b/custom_speech_recognition/__init__.py index 1d339b0..8af9146 100644 --- a/custom_speech_recognition/__init__.py +++ b/custom_speech_recognition/__init__.py @@ -1,24 +1,25 @@ #!/usr/bin/env python3 -"""Library for performing speech recognition, with support for several engines and APIs, online and offline.""" +"""Library for performing speech recognition, with support for several engines and APIs, +online and offline.""" -import io -import os -import tempfile -import sys -import subprocess -import wave import aifc -import math import audioop -import collections -import json import base64 -import threading +import collections import hashlib import hmac +import io +import json +import math +import os +import subprocess +import sys +import tempfile +import threading import time import uuid +import wave try: import requests @@ -29,71 +30,122 @@ __version__ = "3.10.0" __license__ = "BSD" +from urllib.error import HTTPError, URLError from urllib.parse import urlencode from urllib.request import Request, urlopen -from urllib.error import URLError, HTTPError from .audio import AudioData, get_flac_converter from .exceptions import ( RequestError, - TranscriptionFailed, + TranscriptionFailed, TranscriptionNotReady, UnknownValueError, WaitTimeoutError, ) from .recognizers import whisper +AUDIO_DATA_MUST_BE_AUDIO_DATA = "``audio_data`` must be audio data" +LANGUAGE_MUST_BE_STRING = "``language`` must be a string" +KEY_MUST_BE_STRING = "``key`` must be a string" + +NOT_IMPLEMENTED_ERROR_ABSTRACT_CLASS = "this is an abstract class" -class AudioSource(object): + +class AudioSource: def __init__(self): - raise NotImplementedError("this is an abstract class") + raise NotImplementedError(NOT_IMPLEMENTED_ERROR_ABSTRACT_CLASS) def __enter__(self): - raise NotImplementedError("this is an abstract class") + raise NotImplementedError(NOT_IMPLEMENTED_ERROR_ABSTRACT_CLASS) def __exit__(self, exc_type, exc_value, traceback): - raise NotImplementedError("this is an abstract class") + raise NotImplementedError(NOT_IMPLEMENTED_ERROR_ABSTRACT_CLASS) class Microphone(AudioSource): """ - Creates a new ``Microphone`` instance, which represents a physical microphone on the computer. Subclass of ``AudioSource``. + Creates a new ``Microphone`` instance, which represents a physical microphone on the + computer. Subclass of ``AudioSource``. - This will throw an ``AttributeError`` if you don't have PyAudio 0.2.11 or later installed. + This will throw an ``AttributeError`` if you don't have PyAudio 0.2.11 or later + installed. - If ``device_index`` is unspecified or ``None``, the default microphone is used as the audio source. Otherwise, ``device_index`` should be the index of the device to use for audio input. + If ``device_index`` is unspecified or ``None``, the default microphone is used as + the audio source. Otherwise, ``device_index`` should be the index of the device to + use for audio input. - A device index is an integer between 0 and ``pyaudio.get_device_count() - 1`` (assume we have used ``import pyaudio`` beforehand) inclusive. It represents an audio device such as a microphone or speaker. See the `PyAudio documentation `__ for more details. + A device index is an integer between 0 and ``pyaudio.get_device_count() - 1`` + (assume we have used ``import pyaudio`` beforehand) inclusive. It represents an + audio device such as a microphone or speaker. See the `PyAudio documentation + `__ for more details. - The microphone audio is recorded in chunks of ``chunk_size`` samples, at a rate of ``sample_rate`` samples per second (Hertz). If not specified, the value of ``sample_rate`` is determined automatically from the system's microphone settings. + The microphone audio is recorded in chunks of ``chunk_size`` samples, at a rate of + ``sample_rate`` samples per second (Hertz). If not specified, the value of + ``sample_rate`` is determined automatically from the system's microphone settings. - Higher ``sample_rate`` values result in better audio quality, but also more bandwidth (and therefore, slower recognition). Additionally, some CPUs, such as those in older Raspberry Pi models, can't keep up if this value is too high. + Higher ``sample_rate`` values result in better audio quality, but also more + bandwidth (and therefore, slower recognition). Additionally, some CPUs, such as + those in older Raspberry Pi models, can't keep up if this value is too high. - Higher ``chunk_size`` values help avoid triggering on rapidly changing ambient noise, but also makes detection less sensitive. This value, generally, should be left at its default. + Higher ``chunk_size`` values help avoid triggering on rapidly changing ambient + noise, but also makes detection less sensitive. This value, generally, should be + left at its default. """ - def __init__(self, device_index=None, sample_rate=None, chunk_size=1024, speaker=False, channels = 1): - assert device_index is None or isinstance(device_index, int), "Device index must be None or an integer" - assert sample_rate is None or (isinstance(sample_rate, int) and sample_rate > 0), "Sample rate must be None or a positive integer" - assert isinstance(chunk_size, int) and chunk_size > 0, "Chunk size must be a positive integer" + + def __init__( + self, + device_index=None, + sample_rate=None, + chunk_size=1024, + speaker=False, + channels=1, + ): + assert device_index is None or isinstance( + device_index, int + ), "Device index must be None or an integer" + assert sample_rate is None or ( + isinstance(sample_rate, int) and sample_rate > 0 + ), "Sample rate must be None or a positive integer" + assert ( + isinstance(chunk_size, int) and chunk_size > 0 + ), "Chunk size must be a positive integer" # set up PyAudio - self.speaker=speaker + self.speaker = speaker self.pyaudio_module = self.get_pyaudio() audio = self.pyaudio_module.PyAudio() try: count = audio.get_device_count() # obtain device count - if device_index is not None: # ensure device index is in range - assert 0 <= device_index < count, "Device index out of range ({} devices available; device index should be between 0 and {} inclusive)".format(count, count - 1) - if sample_rate is None: # automatically set the sample rate to the hardware's default sample rate if not specified - device_info = audio.get_device_info_by_index(device_index) if device_index is not None else audio.get_default_input_device_info() - assert isinstance(device_info.get("defaultSampleRate"), (float, int)) and device_info["defaultSampleRate"] > 0, "Invalid device info returned from PyAudio: {}".format(device_info) + + # ensure device index is in range + if device_index is not None: + assert 0 <= device_index < count, ( + "Device index out of range ({} devices available; " + "device index should be between 0 and {} inclusive)" + ).format(count, count - 1) + + # automatically set the sample rate to the hardware's default sample rate if + # not specified + if sample_rate is None: + device_info = ( + audio.get_device_info_by_index(device_index) + if device_index is not None + else audio.get_default_input_device_info() + ) + assert ( + isinstance(device_info.get("defaultSampleRate"), float | int) + and device_info["defaultSampleRate"] > 0 + ), f"Invalid device info returned from PyAudio: {device_info}" sample_rate = int(device_info["defaultSampleRate"]) finally: audio.terminate() self.device_index = device_index self.format = self.pyaudio_module.paInt16 # 16-bit int sampling - self.SAMPLE_WIDTH = self.pyaudio_module.get_sample_size(self.format) # size of each sample + + # size of each sample + self.SAMPLE_WIDTH = self.pyaudio_module.get_sample_size(self.format) + self.SAMPLE_RATE = sample_rate # sampling rate in Hertz self.CHUNK = chunk_size # number of frames stored in each buffer self.channels = channels @@ -104,23 +156,32 @@ def __init__(self, device_index=None, sample_rate=None, chunk_size=1024, speaker @staticmethod def get_pyaudio(): """ - Imports the pyaudio module and checks its version. Throws exceptions if pyaudio can't be found or a wrong version is installed + Imports the pyaudio module and checks its version. Throws exceptions if pyaudio + can't be found or a wrong version is installed """ try: import pyaudiowpatch as pyaudio except ImportError: raise AttributeError("Could not find PyAudio; check installation") from distutils.version import LooseVersion + if LooseVersion(pyaudio.__version__) < LooseVersion("0.2.11"): - raise AttributeError("PyAudio 0.2.11 or later is required (found version {})".format(pyaudio.__version__)) + raise AttributeError( + "PyAudio 0.2.11 or later is required (found version {})".format( + pyaudio.__version__ + ) + ) return pyaudio @staticmethod def list_microphone_names(): """ - Returns a list of the names of all available microphones. For microphones where the name can't be retrieved, the list entry contains ``None`` instead. + Returns a list of the names of all available microphones. For microphones where + the name can't be retrieved, the list entry contains ``None`` instead. - The index of each microphone's name in the returned list is the same as its device index when creating a ``Microphone`` instance - if you want to use the microphone at index 3 in the returned list, use ``Microphone(device_index=3)``. + The index of each microphone's name in the returned list is the same as its + device index when creating a ``Microphone`` instance - if you want to use the + microphone at index 3 in the returned list, use ``Microphone(device_index=3)``. """ audio = Microphone.get_pyaudio().PyAudio() try: @@ -135,9 +196,15 @@ def list_microphone_names(): @staticmethod def list_working_microphones(): """ - Returns a dictionary mapping device indices to microphone names, for microphones that are currently hearing sounds. When using this function, ensure that your microphone is unmuted and make some noise at it to ensure it will be detected as working. - - Each key in the returned dictionary can be passed to the ``Microphone`` constructor to use that microphone. For example, if the return value is ``{3: "HDA Intel PCH: ALC3232 Analog (hw:1,0)"}``, you can do ``Microphone(device_index=3)`` to use that microphone. + Returns a dictionary mapping device indices to microphone names, for microphones + that are currently hearing sounds. When using this function, ensure that your + microphone is unmuted and make some noise at it to ensure it will be detected as + working. + + Each key in the returned dictionary can be passed to the ``Microphone`` + constructor to use that microphone. For example, if the return value is + ``{3: "HDA Intel PCH: ALC3232 Analog (hw:1,0)"}``, you can do + ``Microphone(device_index=3)`` to use that microphone. """ pyaudio_module = Microphone.get_pyaudio() audio = pyaudio_module.PyAudio() @@ -146,25 +213,37 @@ def list_working_microphones(): for device_index in range(audio.get_device_count()): device_info = audio.get_device_info_by_index(device_index) device_name = device_info.get("name") - assert isinstance(device_info.get("defaultSampleRate"), (float, int)) and device_info["defaultSampleRate"] > 0, "Invalid device info returned from PyAudio: {}".format(device_info) + assert ( + isinstance(device_info.get("defaultSampleRate"), float | int) + and device_info["defaultSampleRate"] > 0 + ), f"Invalid device info returned from PyAudio: {device_info}" try: # read audio pyaudio_stream = audio.open( - input_device_index=device_index, channels=1, format=pyaudio_module.paInt16, - rate=int(device_info["defaultSampleRate"]), input=True + input_device_index=device_index, + channels=1, + format=pyaudio_module.paInt16, + rate=int(device_info["defaultSampleRate"]), + input=True, ) try: - buffer = pyaudio_stream.read(1024) - if not pyaudio_stream.is_stopped(): pyaudio_stream.stop_stream() + audio_buffer = pyaudio_stream.read(1024) + if not pyaudio_stream.is_stopped(): + pyaudio_stream.stop_stream() finally: pyaudio_stream.close() except Exception: continue # compute RMS of debiased audio - energy = -audioop.rms(buffer, 2) + energy = -audioop.rms(audio_buffer, 2) energy_bytes = bytes([energy & 0xFF, (energy >> 8) & 0xFF]) - debiased_energy = audioop.rms(audioop.add(buffer, energy_bytes * (len(buffer) // 2), 2), 2) + debiased_energy = audioop.rms( + audioop.add( + audio_buffer, energy_bytes * (len(audio_buffer) // 2), 2 + ), + 2, + ) if debiased_energy > 30: # probably actually audio result[device_index] = device_name @@ -173,7 +252,9 @@ def list_working_microphones(): return result def __enter__(self): - assert self.stream is None, "This audio source is already inside a context manager" + assert ( + self.stream is None + ), "This audio source is already inside a context manager" self.audio = self.pyaudio_module.PyAudio() try: @@ -186,14 +267,18 @@ def __enter__(self): format=self.format, rate=self.SAMPLE_RATE, frames_per_buffer=self.CHUNK, - input=True + input=True, ) ) else: self.stream = Microphone.MicrophoneStream( self.audio.open( - input_device_index=self.device_index, channels=1, format=self.format, - rate=self.SAMPLE_RATE, frames_per_buffer=self.CHUNK, input=True, + input_device_index=self.device_index, + channels=1, + format=self.format, + rate=self.SAMPLE_RATE, + frames_per_buffer=self.CHUNK, + input=True, ) ) except Exception: @@ -207,7 +292,7 @@ def __exit__(self, exc_type, exc_value, traceback): self.stream = None self.audio.terminate() - class MicrophoneStream(object): + class MicrophoneStream: def __init__(self, pyaudio_stream): self.pyaudio_stream = pyaudio_stream @@ -216,7 +301,8 @@ def read(self, size): def close(self): try: - # sometimes, if the stream isn't stopped, closing the stream throws an exception + # sometimes, if the stream isn't stopped, + # closing the stream throws an exception if not self.pyaudio_stream.is_stopped(): self.pyaudio_stream.stop_stream() finally: @@ -225,21 +311,33 @@ def close(self): class AudioFile(AudioSource): """ - Creates a new ``AudioFile`` instance given a WAV/AIFF/FLAC audio file ``filename_or_fileobject``. Subclass of ``AudioSource``. + Creates a new ``AudioFile`` instance given a WAV/AIFF/FLAC audio file + ``filename_or_fileobject``. Subclass of ``AudioSource``. - If ``filename_or_fileobject`` is a string, then it is interpreted as a path to an audio file on the filesystem. Otherwise, ``filename_or_fileobject`` should be a file-like object such as ``io.BytesIO`` or similar. + If ``filename_or_fileobject`` is a string, then it is interpreted as a path to an + audio file on the filesystem. Otherwise, ``filename_or_fileobject`` should be a + file-like object such as ``io.BytesIO`` or similar. - Note that functions that read from the audio (such as ``recognizer_instance.record`` or ``recognizer_instance.listen``) will move ahead in the stream. For example, if you execute ``recognizer_instance.record(audiofile_instance, duration=10)`` twice, the first time it will return the first 10 seconds of audio, and the second time it will return the 10 seconds of audio right after that. This is always reset to the beginning when entering an ``AudioFile`` context. + Note that functions that read from the audio (such as ``recognizer_instance.record`` + or ``recognizer_instance.listen``) will move ahead in the stream. For example, if + you execute ``recognizer_instance.record(audiofile_instance, duration=10)`` twice, + the first time it will return the first 10 seconds of audio, and the second time it + will return the 10 seconds of audio right after that. This is always reset to the + beginning when entering an ``AudioFile`` context. - WAV files must be in PCM/LPCM format; WAVE_FORMAT_EXTENSIBLE and compressed WAV are not supported and may result in undefined behaviour. + WAV files must be in PCM/LPCM format; WAVE_FORMAT_EXTENSIBLE and compressed WAV are + not supported and may result in undefined behaviour. Both AIFF and AIFF-C (compressed AIFF) formats are supported. - FLAC files must be in native FLAC format; OGG-FLAC is not supported and may result in undefined behaviour. + FLAC files must be in native FLAC format; OGG-FLAC is not supported and may result + in undefined behaviour. """ def __init__(self, filename_or_fileobject): - assert isinstance(filename_or_fileobject, (type(""), type(u""))) or hasattr(filename_or_fileobject, "read"), "Given audio file must be a filename string or a file-like object" + assert isinstance(filename_or_fileobject, str) or hasattr( + filename_or_fileobject, "read" + ), "Given audio file must be a filename string or a file-like object" self.filename_or_fileobject = filename_or_fileobject self.stream = None self.DURATION = None @@ -251,11 +349,15 @@ def __init__(self, filename_or_fileobject): self.FRAME_COUNT = None def __enter__(self): - assert self.stream is None, "This audio source is already inside a context manager" + assert ( + self.stream is None + ), "This audio source is already inside a context manager" try: # attempt to read the file as WAV self.audio_reader = wave.open(self.filename_or_fileobject, "rb") - self.little_endian = True # RIFF WAV is a little-endian format (most ``audioop`` operations assume that the frames are stored in little-endian form) + self.little_endian = True # RIFF WAV is a little-endian format + # (most ``audioop`` operations assume that the frames are stored in + # little-endian form) except (wave.Error, EOFError): try: # attempt to read the file as AIFF @@ -266,102 +368,187 @@ def __enter__(self): if hasattr(self.filename_or_fileobject, "read"): flac_data = self.filename_or_fileobject.read() else: - with open(self.filename_or_fileobject, "rb") as f: flac_data = f.read() + with open(self.filename_or_fileobject, "rb") as f: + flac_data = f.read() # run the FLAC converter with the FLAC data to get the AIFF data flac_converter = get_flac_converter() - if os.name == "nt": # on Windows, specify that the process is to be started without showing a console window + # on Windows, specify that the process is to be started without showing + # a console window + if os.name == "nt": startup_info = subprocess.STARTUPINFO() - startup_info.dwFlags |= subprocess.STARTF_USESHOWWINDOW # specify that the wShowWindow field of `startup_info` contains a value - startup_info.wShowWindow = subprocess.SW_HIDE # specify that the console window should be hidden + # specify that the wShowWindow field of + # `startup_info` contains a value + startup_info.dwFlags |= subprocess.STARTF_USESHOWWINDOW + # specify that the console window should be hidden + startup_info.wShowWindow = subprocess.SW_HIDE else: startup_info = None # default startupinfo - process = subprocess.Popen([ - flac_converter, - "--stdout", "--totally-silent", # put the resulting AIFF file in stdout, and make sure it's not mixed with any program output - "--decode", "--force-aiff-format", # decode the FLAC file into an AIFF file - "-", # the input FLAC file contents will be given in stdin - ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, startupinfo=startup_info) + process = subprocess.Popen( + [ + flac_converter, + "--stdout", + # put the resulting AIFF file in stdout, + # and make sure it's not mixed with any program output + "--totally-silent", + "--decode", + "--force-aiff-format", # decode the FLAC file into an AIFF file + "-", # the input FLAC file contents will be given in stdin + ], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + startupinfo=startup_info, + ) aiff_data, _ = process.communicate(flac_data) aiff_file = io.BytesIO(aiff_data) try: self.audio_reader = aifc.open(aiff_file, "rb") except (aifc.Error, EOFError): - raise ValueError("Audio file could not be read as PCM WAV, AIFF/AIFF-C, or Native FLAC; check if file is corrupted or in another format") + raise ValueError( + "Audio file could not be read as PCM WAV, AIFF/AIFF-C, or " + "Native FLAC; check if file is corrupted or in another format" + ) self.little_endian = False # AIFF is a big-endian format - assert 1 <= self.audio_reader.getnchannels() <= 2, "Audio must be mono or stereo" + assert ( + 1 <= self.audio_reader.getnchannels() <= 2 + ), "Audio must be mono or stereo" self.SAMPLE_WIDTH = self.audio_reader.getsampwidth() - # 24-bit audio needs some special handling for old Python versions (workaround for https://bugs.python.org/issue12866) + # 24-bit audio needs some special handling for old Python versions (workaround + # for https://bugs.python.org/issue12866) samples_24_bit_pretending_to_be_32_bit = False if self.SAMPLE_WIDTH == 3: # 24-bit audio - try: audioop.bias(b"", self.SAMPLE_WIDTH, 0) # test whether this sample width is supported (for example, ``audioop`` in Python 3.3 and below don't support sample width 3, while Python 3.4+ do) - except audioop.error: # this version of audioop doesn't support 24-bit audio (probably Python 3.3 or less) - samples_24_bit_pretending_to_be_32_bit = True # while the ``AudioFile`` instance will outwardly appear to be 32-bit, it will actually internally be 24-bit - self.SAMPLE_WIDTH = 4 # the ``AudioFile`` instance should present itself as a 32-bit stream now, since we'll be converting into 32-bit on the fly when reading + try: + # test whether this sample width is supported (for example, ``audioop`` + # in Python 3.3 and below don't support sample width 3, + # while Python 3.4+ do) + audioop.bias(b"", self.SAMPLE_WIDTH, 0) + + # this version of audioop doesn't support + # 24-bit audio (probably Python 3.3 or less) + except audioop.error: + # while the ``AudioFile`` instance will outwardly appear to be 32-bit, + # it will actually internally be 24-bit + samples_24_bit_pretending_to_be_32_bit = True + # the ``AudioFile`` instance should present itself as a 32-bit stream + # now, since we'll be converting into 32-bit on the fly when reading + self.SAMPLE_WIDTH = 4 self.SAMPLE_RATE = self.audio_reader.getframerate() self.CHUNK = 4096 self.FRAME_COUNT = self.audio_reader.getnframes() self.DURATION = self.FRAME_COUNT / float(self.SAMPLE_RATE) - self.stream = AudioFile.AudioFileStream(self.audio_reader, self.little_endian, samples_24_bit_pretending_to_be_32_bit) + self.stream = AudioFile.AudioFileStream( + self.audio_reader, + self.little_endian, + samples_24_bit_pretending_to_be_32_bit, + ) return self def __exit__(self, exc_type, exc_value, traceback): - if not hasattr(self.filename_or_fileobject, "read"): # only close the file if it was opened by this class in the first place (if the file was originally given as a path) + # only close the file if it was opened by this class in the first place + # (if the file was originally given as a path) + if not hasattr(self.filename_or_fileobject, "read"): self.audio_reader.close() self.stream = None self.DURATION = None - class AudioFileStream(object): - def __init__(self, audio_reader, little_endian, samples_24_bit_pretending_to_be_32_bit): - self.audio_reader = audio_reader # an audio file object (e.g., a `wave.Wave_read` instance) - self.little_endian = little_endian # whether the audio data is little-endian (when working with big-endian things, we'll have to convert it to little-endian before we process it) - self.samples_24_bit_pretending_to_be_32_bit = samples_24_bit_pretending_to_be_32_bit # this is true if the audio is 24-bit audio, but 24-bit audio isn't supported, so we have to pretend that this is 32-bit audio and convert it on the fly + class AudioFileStream: + def __init__( + self, audio_reader, little_endian, samples_24_bit_pretending_to_be_32_bit + ): + # an audio file object (e.g., a `wave.Wave_read` instance) + self.audio_reader = audio_reader + # whether the audio data is little-endian (when working with big-endian + # things, we'll have to convert it to little-endian before we process it) + self.little_endian = little_endian + # this is true if the audio is 24-bit audio, but 24-bit audio isn't + # supported, so we have to pretend that this is 32-bit audio and convert it + # on the fly + self.samples_24_bit_pretending_to_be_32_bit = ( + samples_24_bit_pretending_to_be_32_bit + ) def read(self, size=-1): - buffer = self.audio_reader.readframes(self.audio_reader.getnframes() if size == -1 else size) - if not isinstance(buffer, bytes): buffer = b"" # workaround for https://bugs.python.org/issue24608 + audio_buffer = self.audio_reader.readframes( + self.audio_reader.getnframes() if size == -1 else size + ) + if not isinstance(audio_buffer, bytes): + audio_buffer = b"" # workaround for https://bugs.python.org/issue24608 sample_width = self.audio_reader.getsampwidth() - if not self.little_endian: # big endian format, convert to little endian on the fly - if hasattr(audioop, "byteswap"): # ``audioop.byteswap`` was only added in Python 3.4 (incidentally, that also means that we don't need to worry about 24-bit audio being unsupported, since Python 3.4+ always has that functionality) - buffer = audioop.byteswap(buffer, sample_width) - else: # manually reverse the bytes of each sample, which is slower but works well enough as a fallback - buffer = buffer[sample_width - 1::-1] + b"".join(buffer[i + sample_width:i:-1] for i in range(sample_width - 1, len(buffer), sample_width)) + # big endian format, convert to little endian on the fly + if not self.little_endian: + # ``audioop.byteswap`` was only added in Python 3.4 (incidentally, that + # also means that we don't need to worry about 24-bit audio being + # unsupported, since Python 3.4+ always has that functionality) + if hasattr(audioop, "byteswap"): + audio_buffer = audioop.byteswap(audio_buffer, sample_width) + + # manually reverse the bytes of each sample, which is slower but works + # well enough as a fallback + else: + audio_buffer = audio_buffer[sample_width - 1 :: -1] + b"".join( + audio_buffer[i + sample_width : i : -1] + for i in range( + sample_width - 1, len(audio_buffer), sample_width + ) + ) # workaround for https://bugs.python.org/issue12866 - if self.samples_24_bit_pretending_to_be_32_bit: # we need to convert samples from 24-bit to 32-bit before we can process them with ``audioop`` functions - buffer = b"".join(b"\x00" + buffer[i:i + sample_width] for i in range(0, len(buffer), sample_width)) # since we're in little endian, we prepend a zero byte to each 24-bit sample to get a 32-bit sample - sample_width = 4 # make sure we thread the buffer as 32-bit audio now, after converting it from 24-bit audio + # we need to convert samples from 24-bit to 32-bit before we can process + # them with ``audioop`` functions + if self.samples_24_bit_pretending_to_be_32_bit: + # since we're in little endian, we prepend a zero byte to each 24-bit + # sample to get a 32-bit sample + audio_buffer = b"".join( + b"\x00" + audio_buffer[i : i + sample_width] + for i in range(0, len(audio_buffer), sample_width) + ) + # make sure we thread the buffer as 32-bit audio now, after converting + # it from 24-bit audio + sample_width = 4 if self.audio_reader.getnchannels() != 1: # stereo audio - buffer = audioop.tomono(buffer, sample_width, 1, 1) # convert stereo audio data to mono - return buffer + # convert stereo audio data to mono + audio_buffer = audioop.tomono(audio_buffer, sample_width, 1, 1) + return audio_buffer class Recognizer(AudioSource): def __init__(self): """ - Creates a new ``Recognizer`` instance, which represents a collection of speech recognition functionality. + Creates a new ``Recognizer`` instance, which represents a collection of speech + recognition functionality. """ self.energy_threshold = 300 # minimum audio energy to consider for recording self.dynamic_energy_threshold = True self.dynamic_energy_adjustment_damping = 0.15 self.dynamic_energy_ratio = 1.5 - self.pause_threshold = 0.8 # seconds of non-speaking audio before a phrase is considered complete - self.operation_timeout = None # seconds after an internal operation (e.g., an API request) starts before it times out, or ``None`` for no timeout - - self.phrase_threshold = 0.3 # minimum seconds of speaking audio before we consider the speaking audio a phrase - values below this are ignored (for filtering out clicks and pops) - self.non_speaking_duration = 0.5 # seconds of non-speaking audio to keep on both sides of the recording + # seconds of non-speaking audio before a phrase is considered complete + self.pause_threshold = 0.8 + # seconds after an internal operation (e.g., an API request) starts before it + # times out, or ``None`` for no timeout + self.operation_timeout = None + # minimum seconds of speaking audio before we consider the speaking audio a + # phrase - values below this are ignored (for filtering out clicks and pops) + self.phrase_threshold = 0.3 + # seconds of non-speaking audio to keep on both sides of the recording + self.non_speaking_duration = 0.5 def record(self, source, duration=None, offset=None): """ - Records up to ``duration`` seconds of audio from ``source`` (an ``AudioSource`` instance) starting at ``offset`` (or at the beginning if not specified) into an ``AudioData`` instance, which it returns. + Records up to ``duration`` seconds of audio from ``source`` (an ``AudioSource`` + instance) starting at ``offset`` (or at the beginning if not specified) into an + ``AudioData`` instance, which it returns. - If ``duration`` is not specified, then it will record until there is no more audio input. + If ``duration`` is not specified, then it will record until there is no more + audio input. """ assert isinstance(source, AudioSource), "Source must be an audio source" - assert source.stream is not None, "Audio source must be entered before recording, see documentation for ``AudioSource``; are you using ``source`` outside of a ``with`` statement?" + assert source.stream is not None, ( + "Audio source must be entered before recording, see documentation for " + "``AudioSource``; are you using ``source`` outside of a ``with`` statement?" + ) frames = io.BytesIO() seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE @@ -374,14 +561,16 @@ def record(self, source, duration=None, offset=None): if offset_time > offset: offset_reached = True - buffer = source.stream.read(source.CHUNK) - if len(buffer) == 0: break + audio_buffer = source.stream.read(source.CHUNK) + if len(audio_buffer) == 0: + break if offset_reached or not offset: elapsed_time += seconds_per_buffer - if duration and elapsed_time > duration: break + if duration and elapsed_time > duration: + break - frames.write(buffer) + frames.write(audio_buffer) frame_data = frames.getvalue() frames.close() @@ -389,14 +578,22 @@ def record(self, source, duration=None, offset=None): def adjust_for_ambient_noise(self, source, duration=1): """ - Adjusts the energy threshold dynamically using audio from ``source`` (an ``AudioSource`` instance) to account for ambient noise. + Adjusts the energy threshold dynamically using audio from ``source`` (an + ``AudioSource`` instance) to account for ambient noise. - Intended to calibrate the energy threshold with the ambient energy level. Should be used on periods of audio without speech - will stop early if any speech is detected. + Intended to calibrate the energy threshold with the ambient energy level. + Should be used on periods of audio without speech - will stop early if any + speech is detected. - The ``duration`` parameter is the maximum number of seconds that it will dynamically adjust the threshold for before returning. This value should be at least 0.5 in order to get a representative sample of the ambient noise. + The ``duration`` parameter is the maximum number of seconds that it will + dynamically adjust the threshold for before returning. This value should be at + least 0.5 in order to get a representative sample of the ambient noise. """ assert isinstance(source, AudioSource), "Source must be an audio source" - assert source.stream is not None, "Audio source must be entered before adjusting, see documentation for ``AudioSource``; are you using ``source`` outside of a ``with`` statement?" + assert source.stream is not None, ( + "Audio source must be entered before adjusting, see documentation for " + "``AudioSource``; are you using ``source`` outside of a ``with`` statement?" + ) assert self.pause_threshold >= self.non_speaking_duration >= 0 seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE @@ -405,27 +602,40 @@ def adjust_for_ambient_noise(self, source, duration=1): # adjust energy threshold until a phrase starts while True: elapsed_time += seconds_per_buffer - if elapsed_time > duration: break - buffer = source.stream.read(source.CHUNK) - energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal + if elapsed_time > duration: + break + audio_buffer = source.stream.read(source.CHUNK) + + # energy of the audio signal + energy = audioop.rms(audio_buffer, source.SAMPLE_WIDTH) # dynamically adjust the energy threshold using asymmetric weighted average - damping = self.dynamic_energy_adjustment_damping ** seconds_per_buffer # account for different chunk sizes and rates + # account for different chunk sizes and rates + damping = self.dynamic_energy_adjustment_damping**seconds_per_buffer target_energy = energy * self.dynamic_energy_ratio - self.energy_threshold = self.energy_threshold * damping + target_energy * (1 - damping) + self.energy_threshold = self.energy_threshold * damping + target_energy * ( + 1 - damping + ) - def snowboy_wait_for_hot_word(self, snowboy_location, snowboy_hot_word_files, source, timeout=None): + def snowboy_wait_for_hot_word( + self, snowboy_location, snowboy_hot_word_files, source, timeout=None + ): # load snowboy library (NOT THREAD SAFE) sys.path.append(snowboy_location) import snowboydetect + sys.path.pop() detector = snowboydetect.SnowboyDetect( - resource_filename=os.path.join(snowboy_location, "resources", "common.res").encode(), - model_str=",".join(snowboy_hot_word_files).encode() + resource_filename=os.path.join( + snowboy_location, "resources", "common.res" + ).encode(), + model_str=",".join(snowboy_hot_word_files).encode(), ) detector.SetAudioGain(1.0) - detector.SetSensitivity(",".join(["0.4"] * len(snowboy_hot_word_files)).encode()) + detector.SetSensitivity( + ",".join(["0.4"] * len(snowboy_hot_word_files)).encode() + ) snowboy_sample_rate = detector.SampleRate() elapsed_time = 0 @@ -434,65 +644,128 @@ def snowboy_wait_for_hot_word(self, snowboy_location, snowboy_hot_word_files, so # buffers capable of holding 5 seconds of original audio five_seconds_buffer_count = int(math.ceil(5 / seconds_per_buffer)) + # buffers capable of holding 0.5 seconds of resampled audio half_second_buffer_count = int(math.ceil(0.5 / seconds_per_buffer)) + frames = collections.deque(maxlen=five_seconds_buffer_count) resampled_frames = collections.deque(maxlen=half_second_buffer_count) + # snowboy check interval check_interval = 0.05 last_check = time.time() while True: elapsed_time += seconds_per_buffer if timeout and elapsed_time > timeout: - raise WaitTimeoutError("listening timed out while waiting for hotword to be said") + raise WaitTimeoutError( + "listening timed out while waiting for hotword to be said" + ) - buffer = source.stream.read(source.CHUNK) - if len(buffer) == 0: break # reached end of the stream - frames.append(buffer) + audio_buffer = source.stream.read(source.CHUNK) + if len(audio_buffer) == 0: + break # reached end of the stream + frames.append(audio_buffer) # resample audio to the required sample rate - resampled_buffer, resampling_state = audioop.ratecv(buffer, source.SAMPLE_WIDTH, 1, source.SAMPLE_RATE, snowboy_sample_rate, resampling_state) + resampled_buffer, resampling_state = audioop.ratecv( + audio_buffer, + source.SAMPLE_WIDTH, + 1, + source.SAMPLE_RATE, + snowboy_sample_rate, + resampling_state, + ) resampled_frames.append(resampled_buffer) if time.time() - last_check > check_interval: # run Snowboy on the resampled audio snowboy_result = detector.RunDetection(b"".join(resampled_frames)) - assert snowboy_result != -1, "Error initializing streams or reading audio data" - if snowboy_result > 0: break # wake word found + assert ( + snowboy_result != -1 + ), "Error initializing streams or reading audio data" + if snowboy_result > 0: + break # wake word found resampled_frames.clear() last_check = time.time() return b"".join(frames), elapsed_time - def listen(self, source, timeout=None, phrase_time_limit=None, snowboy_configuration=None): + def listen( + self, source, timeout=None, phrase_time_limit=None, snowboy_configuration=None + ): """ - Records a single phrase from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance, which it returns. - - This is done by waiting until the audio has an energy above ``recognizer_instance.energy_threshold`` (the user has started speaking), and then recording until it encounters ``recognizer_instance.pause_threshold`` seconds of non-speaking or there is no more audio input. The ending silence is not included. - - The ``timeout`` parameter is the maximum number of seconds that this will wait for a phrase to start before giving up and throwing an ``speech_recognition.WaitTimeoutError`` exception. If ``timeout`` is ``None``, there will be no wait timeout. - - The ``phrase_time_limit`` parameter is the maximum number of seconds that this will allow a phrase to continue before stopping and returning the part of the phrase processed before the time limit was reached. The resulting audio will be the phrase cut off at the time limit. If ``phrase_timeout`` is ``None``, there will be no phrase time limit. - - The ``snowboy_configuration`` parameter allows integration with `Snowboy `__, an offline, high-accuracy, power-efficient hotword recognition engine. When used, this function will pause until Snowboy detects a hotword, after which it will unpause. This parameter should either be ``None`` to turn off Snowboy support, or a tuple of the form ``(SNOWBOY_LOCATION, LIST_OF_HOT_WORD_FILES)``, where ``SNOWBOY_LOCATION`` is the path to the Snowboy root directory, and ``LIST_OF_HOT_WORD_FILES`` is a list of paths to Snowboy hotword configuration files (`*.pmdl` or `*.umdl` format). - - This operation will always complete within ``timeout + phrase_timeout`` seconds if both are numbers, either by returning the audio data, or by raising a ``speech_recognition.WaitTimeoutError`` exception. + Records a single phrase from ``source`` (an ``AudioSource`` instance) into an + ``AudioData`` instance, which it returns. + + This is done by waiting until the audio has an energy above + ``recognizer_instance.energy_threshold`` (the user has started speaking), and + then recording until it encounters ``recognizer_instance.pause_threshold`` + seconds of non-speaking or there is no more audio input. The ending silence is + not included. + + The ``timeout`` parameter is the maximum number of seconds that this will wait + for a phrase to start before giving up and throwing an + ``speech_recognition.WaitTimeoutError`` exception. If ``timeout`` is ``None``, + there will be no wait timeout. + + The ``phrase_time_limit`` parameter is the maximum number of seconds that this + will allow a phrase to continue before stopping and returning the part of the + phrase processed before the time limit was reached. The resulting audio will be + the phrase cut off at the time limit. If ``phrase_timeout`` is ``None``, there + will be no phrase time limit. + + The ``snowboy_configuration`` parameter allows integration with + `Snowboy `__, an offline, high-accuracy, + power-efficient hotword recognition engine. When used, this function will pause + until Snowboy detects a hotword, after which it will unpause. This parameter + should either be ``None`` to turn off Snowboy support, or a tuple of the form + ``(SNOWBOY_LOCATION, LIST_OF_HOT_WORD_FILES)``, where ``SNOWBOY_LOCATION`` is + the path to the Snowboy root directory, and ``LIST_OF_HOT_WORD_FILES`` is a list + of paths to Snowboy hotword configuration files (`*.pmdl` or `*.umdl` format). + + This operation will always complete within ``timeout + phrase_timeout`` seconds + if both are numbers, either by returning the audio data, or by raising a + ``speech_recognition.WaitTimeoutError`` exception. """ assert isinstance(source, AudioSource), "Source must be an audio source" - assert source.stream is not None, "Audio source must be entered before listening, see documentation for ``AudioSource``; are you using ``source`` outside of a ``with`` statement?" + assert source.stream is not None, ( + "Audio source must be entered before listening, see documentation for " + "``AudioSource``; are you using ``source`` outside of a ``with`` statement?" + ) assert self.pause_threshold >= self.non_speaking_duration >= 0 if snowboy_configuration is not None: - assert os.path.isfile(os.path.join(snowboy_configuration[0], "snowboydetect.py")), "``snowboy_configuration[0]`` must be a Snowboy root directory containing ``snowboydetect.py``" + assert os.path.isfile( + os.path.join(snowboy_configuration[0], "snowboydetect.py") + ), ( + "``snowboy_configuration[0]`` must be a Snowboy root directory " + "containing ``snowboydetect.py``" + ) for hot_word_file in snowboy_configuration[1]: - assert os.path.isfile(hot_word_file), "``snowboy_configuration[1]`` must be a list of Snowboy hot word configuration files" + assert os.path.isfile(hot_word_file), ( + "``snowboy_configuration[1]`` must be a list of Snowboy hot word " + "configuration files" + ) seconds_per_buffer = float(source.CHUNK) / source.SAMPLE_RATE - pause_buffer_count = int(math.ceil(self.pause_threshold / seconds_per_buffer)) # number of buffers of non-speaking audio during a phrase, before the phrase should be considered complete - phrase_buffer_count = int(math.ceil(self.phrase_threshold / seconds_per_buffer)) # minimum number of buffers of speaking audio before we consider the speaking audio a phrase - non_speaking_buffer_count = int(math.ceil(self.non_speaking_duration / seconds_per_buffer)) # maximum number of buffers of non-speaking audio to retain before and after a phrase + + # number of buffers of non-speaking audio during a phrase, before the phrase + # should be considered complete + pause_buffer_count = int(math.ceil(self.pause_threshold / seconds_per_buffer)) + + # minimum number of buffers of speaking audio + # before we consider the speaking audio a phrase + phrase_buffer_count = int(math.ceil(self.phrase_threshold / seconds_per_buffer)) + + # maximum number of buffers of non-speaking audio + # to retain before and after a phrase + non_speaking_buffer_count = int( + math.ceil(self.non_speaking_duration / seconds_per_buffer) + ) # read audio input for phrases until there is a phrase that is long enough elapsed_time = 0 # number of seconds of audio read - buffer = b"" # an empty buffer means that the stream has ended and there is no data left to read + # an empty buffer means that the stream has ended + # and there is no data left to read + audio_buffer = b"" while True: frames = collections.deque() @@ -502,30 +775,48 @@ def listen(self, source, timeout=None, phrase_time_limit=None, snowboy_configura # handle waiting too long for phrase by raising an exception elapsed_time += seconds_per_buffer if timeout and elapsed_time > timeout: - raise WaitTimeoutError("listening timed out while waiting for phrase to start") + raise WaitTimeoutError( + "listening timed out while waiting for phrase to start" + ) + + audio_buffer = source.stream.read(source.CHUNK) + if len(audio_buffer) == 0: + break # reached end of the stream + frames.append(audio_buffer) - buffer = source.stream.read(source.CHUNK) - if len(buffer) == 0: break # reached end of the stream - frames.append(buffer) - if len(frames) > non_speaking_buffer_count: # ensure we only keep the needed amount of non-speaking buffers + # ensure we only keep the needed amount of non-speaking buffers + if len(frames) > non_speaking_buffer_count: frames.popleft() # detect whether speaking has started on audio input - energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal - if energy > self.energy_threshold: break - # dynamically adjust the energy threshold using asymmetric weighted average + # energy of the audio signal + energy = audioop.rms(audio_buffer, source.SAMPLE_WIDTH) + if energy > self.energy_threshold: + break + + # dynamically adjust the energy threshold using asymmetric + # weighted average if self.dynamic_energy_threshold: - damping = self.dynamic_energy_adjustment_damping ** seconds_per_buffer # account for different chunk sizes and rates + # account for different chunk sizes and rates + damping = ( + self.dynamic_energy_adjustment_damping**seconds_per_buffer + ) target_energy = energy * self.dynamic_energy_ratio - self.energy_threshold = self.energy_threshold * damping + target_energy * (1 - damping) + self.energy_threshold = ( + self.energy_threshold * damping + + target_energy * (1 - damping) + ) else: # read audio input until the hotword is said snowboy_location, snowboy_hot_word_files = snowboy_configuration - buffer, delta_time = self.snowboy_wait_for_hot_word(snowboy_location, snowboy_hot_word_files, source, timeout) + audio_buffer, delta_time = self.snowboy_wait_for_hot_word( + snowboy_location, snowboy_hot_word_files, source, timeout + ) elapsed_time += delta_time - if len(buffer) == 0: break # reached end of the stream - frames.append(buffer) + if len(audio_buffer) == 0: + break # reached end of the stream + frames.append(audio_buffer) # read audio input until the phrase ends pause_count, phrase_count = 0, 0 @@ -533,16 +824,23 @@ def listen(self, source, timeout=None, phrase_time_limit=None, snowboy_configura while True: # handle phrase being too long by cutting off the audio elapsed_time += seconds_per_buffer - if phrase_time_limit and elapsed_time - phrase_start_time > phrase_time_limit: + if ( + phrase_time_limit + and elapsed_time - phrase_start_time > phrase_time_limit + ): break - buffer = source.stream.read(source.CHUNK) - if len(buffer) == 0: break # reached end of the stream - frames.append(buffer) + audio_buffer = source.stream.read(source.CHUNK) + if len(audio_buffer) == 0: + break # reached end of the stream + frames.append(audio_buffer) phrase_count += 1 - # check if speaking has stopped for longer than the pause threshold on the audio input - energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # unit energy of the audio signal within the buffer + # check if speaking has stopped for longer than the pause threshold + # on the audio input + + # unit energy of the audio signal within the buffer + energy = audioop.rms(audio_buffer, source.SAMPLE_WIDTH) if energy > self.energy_threshold: pause_count = 0 else: @@ -550,25 +848,49 @@ def listen(self, source, timeout=None, phrase_time_limit=None, snowboy_configura if pause_count > pause_buffer_count: # end of the phrase break - # check how long the detected phrase is, and retry listening if the phrase is too short - phrase_count -= pause_count # exclude the buffers for the pause before the phrase - if phrase_count >= phrase_buffer_count or len(buffer) == 0: break # phrase is long enough or we've reached the end of the stream, so stop listening + # check how long the detected phrase is, + # and retry listening if the phrase is too short + + # exclude the buffers for the pause before the phrase + phrase_count -= pause_count + + if phrase_count >= phrase_buffer_count or len(audio_buffer) == 0: + # phrase is long enough or we've reached the end of the stream, + # so stop listening + break # obtain frame data - for i in range(pause_count - non_speaking_buffer_count): frames.pop() # remove extra non-speaking frames at the end + for _ in range(pause_count - non_speaking_buffer_count): + frames.pop() # remove extra non-speaking frames at the end frame_data = b"".join(frames) return AudioData(frame_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH) def listen_in_background(self, source, callback, phrase_time_limit=None): """ - Spawns a thread to repeatedly record phrases from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance and call ``callback`` with that ``AudioData`` instance as soon as each phrase are detected. - - Returns a function object that, when called, requests that the background listener thread stop. The background thread is a daemon and will not stop the program from exiting if there are no other non-daemon threads. The function accepts one parameter, ``wait_for_stop``: if truthy, the function will wait for the background listener to stop before returning, otherwise it will return immediately and the background listener thread might still be running for a second or two afterwards. Additionally, if you are using a truthy value for ``wait_for_stop``, you must call the function from the same thread you originally called ``listen_in_background`` from. - - Phrase recognition uses the exact same mechanism as ``recognizer_instance.listen(source)``. The ``phrase_time_limit`` parameter works in the same way as the ``phrase_time_limit`` parameter for ``recognizer_instance.listen(source)``, as well. - - The ``callback`` parameter is a function that should accept two parameters - the ``recognizer_instance``, and an ``AudioData`` instance representing the captured audio. Note that ``callback`` function will be called from a non-main thread. + Spawns a thread to repeatedly record phrases from ``source`` (an + ``AudioSource`` instance) into an ``AudioData`` instance and call ``callback`` + with that ``AudioData`` instance as soon as each phrase are detected. + + Returns a function object that, when called, requests that the background + listener thread stop. The background thread is a daemon and will not stop the + program from exiting if there are no other non-daemon threads. The function + accepts one parameter, ``wait_for_stop``: if truthy, the function will wait for + the background listener to stop before returning, otherwise it will return + immediately and the background listener thread might still be running for a + second or two afterwards. Additionally, if you are using a truthy value for + ``wait_for_stop``, you must call the function from the same thread you + originally called ``listen_in_background`` from. + + Phrase recognition uses the exact same mechanism as + ``recognizer_instance.listen(source)``. The ``phrase_time_limit`` parameter + works in the same way as the ``phrase_time_limit`` parameter for + ``recognizer_instance.listen(source)``, as well. + + The ``callback`` parameter is a function that should accept two parameters - + the ``recognizer_instance``, and an ``AudioData`` instance representing the + captured audio. Note that ``callback`` function will be called from a non-main + thread. """ assert isinstance(source, AudioSource), "Source must be an audio source" running = [True] @@ -576,95 +898,199 @@ def listen_in_background(self, source, callback, phrase_time_limit=None): def threaded_listen(): with source as s: while running[0]: - try: # listen for 1 second, then check again if the stop function has been called + # listen for 1 second, then check again + # if the stop function has been called + try: audio = self.listen(s, 1, phrase_time_limit) - except WaitTimeoutError: # listening timed out, just try again + + # listening timed out, just try again + except WaitTimeoutError: pass else: - if running[0]: callback(self, audio) + if running[0]: + callback(self, audio) def stopper(wait_for_stop=True): running[0] = False if wait_for_stop: - listener_thread.join() # block until the background thread is done, which can take around 1 second + # block until the background thread is done, + # which can take around 1 second + listener_thread.join() listener_thread = threading.Thread(target=threaded_listen) listener_thread.daemon = True listener_thread.start() return stopper - def recognize_sphinx(self, audio_data, language="en-US", keyword_entries=None, grammar=None, show_all=False): + def recognize_sphinx( + self, + audio_data, + language="en-US", + keyword_entries=None, + grammar=None, + show_all=False, + ): """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using CMU Sphinx. - - The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` or ``"en-GB"``, defaulting to US English. Out of the box, only ``en-US`` is supported. See `Notes on using `PocketSphinx `__ for information about installing other languages. This document is also included under ``reference/pocketsphinx.rst``. The ``language`` parameter can also be a tuple of filesystem paths, of the form ``(acoustic_parameters_directory, language_model_file, phoneme_dictionary_file)`` - this allows you to load arbitrary Sphinx models. - - If specified, the keywords to search for are determined by ``keyword_entries``, an iterable of tuples of the form ``(keyword, sensitivity)``, where ``keyword`` is a phrase, and ``sensitivity`` is how sensitive to this phrase the recognizer should be, on a scale of 0 (very insensitive, more false negatives) to 1 (very sensitive, more false positives) inclusive. If not specified or ``None``, no keywords are used and Sphinx will simply transcribe whatever words it recognizes. Specifying ``keyword_entries`` is more accurate than just looking for those same keywords in non-keyword-based transcriptions, because Sphinx knows specifically what sounds to look for. - - Sphinx can also handle FSG or JSGF grammars. The parameter ``grammar`` expects a path to the grammar file. Note that if a JSGF grammar is passed, an FSG grammar will be created at the same location to speed up execution in the next run. If ``keyword_entries`` are passed, content of ``grammar`` will be ignored. - - Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the Sphinx ``pocketsphinx.pocketsphinx.Decoder`` object resulting from the recognition. - - Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if there are any issues with the Sphinx installation. + Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), + using CMU Sphinx. + + The recognition language is determined by ``language``, an RFC5646 language tag + like ``"en-US"`` or ``"en-GB"``, defaulting to US English. Out of the box, only + ``en-US`` is supported. See `Notes on using `PocketSphinx + `__ + for information about installing other languages. This document is also + included under ``reference/pocketsphinx.rst``. The ``language`` parameter can + also be a tuple of filesystem paths, of the form + ``(acoustic_parameters_directory, language_model_file, + phoneme_dictionary_file)`` - this allows you to load arbitrary Sphinx models. + + If specified, the keywords to search for are determined by ``keyword_entries``, + an iterable of tuples of the form ``(keyword, sensitivity)``, where ``keyword`` + is a phrase, and ``sensitivity`` is how sensitive to this phrase the recognizer + should be, on a scale of 0 (very insensitive, more false negatives) to 1 (very + sensitive, more false positives) inclusive. If not specified or ``None``, no + keywords are used and Sphinx will simply transcribe whatever words it + recognizes. Specifying ``keyword_entries`` is more accurate than just looking + for those same keywords in non-keyword-based transcriptions, because Sphinx + knows specifically what sounds to look for. + + Sphinx can also handle FSG or JSGF grammars. The parameter ``grammar`` expects + a path to the grammar file. Note that if a JSGF grammar is passed, an FSG + grammar will be created at the same location to speed up execution in the next + run. If ``keyword_entries`` are passed, content of ``grammar`` will be ignored. + + Returns the most likely transcription if ``show_all`` is false (the default). + Otherwise, returns the Sphinx ``pocketsphinx.pocketsphinx.Decoder`` object + resulting from the recognition. + + Raises a ``speech_recognition.UnknownValueError`` exception if the speech is + unintelligible. Raises a ``speech_recognition.RequestError`` exception if there + are any issues with the Sphinx installation. """ - assert isinstance(audio_data, AudioData), "``audio_data`` must be audio data" - assert isinstance(language, str) or (isinstance(language, tuple) and len(language) == 3), "``language`` must be a string or 3-tuple of Sphinx data file paths of the form ``(acoustic_parameters, language_model, phoneme_dictionary)``" - assert keyword_entries is None or all(isinstance(keyword, (type(""), type(u""))) and 0 <= sensitivity <= 1 for keyword, sensitivity in keyword_entries), "``keyword_entries`` must be ``None`` or a list of pairs of strings and numbers between 0 and 1" + assert isinstance(audio_data, AudioData), AUDIO_DATA_MUST_BE_AUDIO_DATA + assert isinstance(language, str) or ( + isinstance(language, tuple) and len(language) == 3 + ), ( + "``language`` must be a string or 3-tuple of Sphinx data file paths of the " + "form ``(acoustic_parameters, language_model, phoneme_dictionary)``" + ) + assert keyword_entries is None or all( + isinstance(keyword, str) and 0 <= sensitivity <= 1 + for keyword, sensitivity in keyword_entries + ), ( + "``keyword_entries`` must be ``None`` or a list of pairs of strings and " + "numbers between 0 and 1" + ) # import the PocketSphinx speech recognition module try: - from pocketsphinx import pocketsphinx, Jsgf, FsgModel + from pocketsphinx import FsgModel, Jsgf, pocketsphinx except ImportError: - raise RequestError("missing PocketSphinx module: ensure that PocketSphinx is set up correctly.") + raise RequestError( + "missing PocketSphinx module: ensure that PocketSphinx is set " + "up correctly." + ) except ValueError: - raise RequestError("bad PocketSphinx installation; try reinstalling PocketSphinx version 0.0.9 or better.") - if not hasattr(pocketsphinx, "Decoder") or not hasattr(pocketsphinx.Decoder, "default_config"): - raise RequestError("outdated PocketSphinx installation; ensure you have PocketSphinx version 0.0.9 or better.") + raise RequestError( + "bad PocketSphinx installation; try reinstalling PocketSphinx " + "version 0.0.9 or better." + ) + if not hasattr(pocketsphinx, "Decoder") or not hasattr( + pocketsphinx.Decoder, "default_config" + ): + raise RequestError( + "outdated PocketSphinx installation; ensure you have PocketSphinx " + "version 0.0.9 or better." + ) if isinstance(language, str): # directory containing language data - language_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "pocketsphinx-data", language) + language_directory = os.path.join( + os.path.dirname(os.path.realpath(__file__)), + "pocketsphinx-data", + language, + ) if not os.path.isdir(language_directory): - raise RequestError("missing PocketSphinx language data directory: \"{}\"".format(language_directory)) - acoustic_parameters_directory = os.path.join(language_directory, "acoustic-model") - language_model_file = os.path.join(language_directory, "language-model.lm.bin") - phoneme_dictionary_file = os.path.join(language_directory, "pronounciation-dictionary.dict") + raise RequestError( + 'missing PocketSphinx language data directory: "{}"'.format( + language_directory + ) + ) + acoustic_parameters_directory = os.path.join( + language_directory, "acoustic-model" + ) + language_model_file = os.path.join( + language_directory, "language-model.lm.bin" + ) + phoneme_dictionary_file = os.path.join( + language_directory, "pronounciation-dictionary.dict" + ) else: # 3-tuple of Sphinx data file paths - acoustic_parameters_directory, language_model_file, phoneme_dictionary_file = language + ( + acoustic_parameters_directory, + language_model_file, + phoneme_dictionary_file, + ) = language if not os.path.isdir(acoustic_parameters_directory): - raise RequestError("missing PocketSphinx language model parameters directory: \"{}\"".format(acoustic_parameters_directory)) + raise RequestError( + 'missing PocketSphinx language model parameters directory: "{}"'.format( + acoustic_parameters_directory + ) + ) if not os.path.isfile(language_model_file): - raise RequestError("missing PocketSphinx language model file: \"{}\"".format(language_model_file)) + raise RequestError( + 'missing PocketSphinx language model file: "{}"'.format( + language_model_file + ) + ) if not os.path.isfile(phoneme_dictionary_file): - raise RequestError("missing PocketSphinx phoneme dictionary file: \"{}\"".format(phoneme_dictionary_file)) + raise RequestError( + 'missing PocketSphinx phoneme dictionary file: "{}"'.format( + phoneme_dictionary_file + ) + ) # create decoder object config = pocketsphinx.Decoder.default_config() - config.set_string("-hmm", acoustic_parameters_directory) # set the path of the hidden Markov model (HMM) parameter files + + # set the path of the hidden Markov model (HMM) parameter files + config.set_string("-hmm", acoustic_parameters_directory) + config.set_string("-lm", language_model_file) config.set_string("-dict", phoneme_dictionary_file) - config.set_string("-logfn", os.devnull) # disable logging (logging causes unwanted output in terminal) + + # disable logging (logging causes unwanted output in terminal) + config.set_string("-logfn", os.devnull) + decoder = pocketsphinx.Decoder(config) # obtain audio data - raw_data = audio_data.get_raw_data(convert_rate=16000, convert_width=2) # the included language models require audio to be 16-bit mono 16 kHz in little-endian format + + # the included language models require audio to be + # 16-bit mono 16 kHz in little-endian format + raw_data = audio_data.get_raw_data(convert_rate=16000, convert_width=2) # obtain recognition results if keyword_entries is not None: # explicitly specified set of keywords with PortableNamedTemporaryFile("w") as f: - # generate a keywords file - Sphinx documentation recommendeds sensitivities between 1e-50 and 1e-5 - f.writelines("{} /1e{}/\n".format(keyword, 100 * sensitivity - 110) for keyword, sensitivity in keyword_entries) + # generate a keywords file - Sphinx documentation recommendeds + # sensitivities between 1e-50 and 1e-5 + f.writelines( + "{} /1e{}/\n".format(keyword, 100 * sensitivity - 110) + for keyword, sensitivity in keyword_entries + ) f.flush() - # perform the speech recognition with the keywords file (this is inside the context manager so the file isn;t deleted until we're done) + # perform the speech recognition with the keywords file (this is inside + # the context manager so the file isn;t deleted until we're done) decoder.set_kws("keywords", f.name) decoder.set_search("keywords") elif grammar is not None: # a path to a FSG or JSGF grammar if not os.path.exists(grammar): - raise ValueError("Grammar '{0}' does not exist.".format(grammar)) + raise ValueError(f"Grammar '{grammar}' does not exist.") grammar_path = os.path.abspath(os.path.dirname(grammar)) grammar_name = os.path.splitext(os.path.basename(grammar))[0] - fsg_path = "{0}/{1}.fsg".format(grammar_path, grammar_name) + fsg_path = f"{grammar_path}/{grammar_name}.fsg" if not os.path.exists(fsg_path): # create FSG grammar if not available jsgf = Jsgf(grammar) rule = jsgf.get_rule("{0}.{0}".format(grammar_name)) @@ -676,113 +1102,195 @@ def recognize_sphinx(self, audio_data, language="en-US", keyword_entries=None, g decoder.set_search(grammar_name) decoder.start_utt() # begin utterance processing - decoder.process_raw(raw_data, False, True) # process audio data with recognition enabled (no_search = False), as a full utterance (full_utt = True) + + # process audio data with recognition enabled (no_search = False), as a full + # utterance (full_utt = True) + decoder.process_raw(raw_data, False, True) + decoder.end_utt() # stop utterance processing - if show_all: return decoder + if show_all: + return decoder - # return results hypothesis = decoder.hyp() - if hypothesis is not None: return hypothesis.hypstr + if hypothesis is not None: + return hypothesis.hypstr raise UnknownValueError() # no transcriptions available - def recognize_google(self, audio_data, key=None, language="en-US", pfilter=0, show_all=False, with_confidence=False): + def recognize_google( + self, + audio_data, + key=None, + language="en-US", + pfilter=0, + show_all=False, + with_confidence=False, + ): """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Google Speech Recognition API. - - The Google Speech Recognition API key is specified by ``key``. If not specified, it uses a generic key that works out of the box. This should generally be used for personal or testing purposes only, as it **may be revoked by Google at any time**. - - To obtain your own API key, simply following the steps on the `API Keys `__ page at the Chromium Developers site. In the Google Developers Console, Google Speech Recognition is listed as "Speech API". - - The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language tags can be found in this `StackOverflow answer `__. - - The profanity filter level can be adjusted with ``pfilter``: 0 - No filter, 1 - Only shows the first character and replaces the rest with asterisks. The default is level 0. - - Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the raw API response as a JSON dictionary. - - Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection. + Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), + using the Google Speech Recognition API. + + The Google Speech Recognition API key is specified by ``key``. If not specified, + it uses a generic key that works out of the box. This should generally be used + for personal or testing purposes only, as it **may be revoked by Google at any + time**. + + To obtain your own API key, simply following the steps on the `API Keys + `__ page at the Chromium + Developers site. In the Google Developers Console, Google Speech Recognition is + listed as "Speech API". + + The recognition language is determined by ``language``, an RFC5646 language tag + like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting + to US English. A list of supported language tags can be found in this + `StackOverflow answer `__. + + The profanity filter level can be adjusted with ``pfilter``: 0 - No filter, 1 - + Only shows the first character and replaces the rest with asterisks. + The default is level 0. + + Returns the most likely transcription if ``show_all`` is false (the default). + Otherwise, returns the raw API response as a JSON dictionary. + + Raises a ``speech_recognition.UnknownValueError`` exception if the speech is + unintelligible. Raises a ``speech_recognition.RequestError`` exception if the + speech recognition operation failed, if the key isn't valid, or if there is no + internet connection. """ - assert isinstance(audio_data, AudioData), "``audio_data`` must be audio data" - assert key is None or isinstance(key, str), "``key`` must be ``None`` or a string" - assert isinstance(language, str), "``language`` must be a string" + assert isinstance(audio_data, AudioData), AUDIO_DATA_MUST_BE_AUDIO_DATA + assert key is None or isinstance( + key, str + ), "``key`` must be ``None`` or a string" + assert isinstance(language, str), LANGUAGE_MUST_BE_STRING flac_data = audio_data.get_flac_data( - convert_rate=None if audio_data.sample_rate >= 8000 else 8000, # audio samples must be at least 8 kHz - convert_width=2 # audio samples must be 16-bit + convert_rate=None + if audio_data.sample_rate >= 8000 + else 8000, # audio samples must be at least 8 kHz + convert_width=2, # audio samples must be 16-bit + ) + if key is None: + key = "AIzaSyBOti4mM-6x9WDnZIjIeyEU21OpBXqWBgw" + url = "http://www.google.com/speech-api/v2/recognize?{}".format( + urlencode( + {"client": "chromium", "lang": language, "key": key, "pFilter": pfilter} + ) + ) + request = Request( + url, + data=flac_data, + headers={"Content-Type": f"audio/x-flac; rate={audio_data.sample_rate}"}, ) - if key is None: key = "AIzaSyBOti4mM-6x9WDnZIjIeyEU21OpBXqWBgw" - url = "http://www.google.com/speech-api/v2/recognize?{}".format(urlencode({ - "client": "chromium", - "lang": language, - "key": key, - "pFilter": pfilter - })) - request = Request(url, data=flac_data, headers={"Content-Type": "audio/x-flac; rate={}".format(audio_data.sample_rate)}) # obtain audio transcription results try: response = urlopen(request, timeout=self.operation_timeout) except HTTPError as e: - raise RequestError("recognition request failed: {}".format(e.reason)) + raise RequestError(f"recognition request failed: {e.reason}") except URLError as e: - raise RequestError("recognition connection failed: {}".format(e.reason)) + raise RequestError(f"recognition connection failed: {e.reason}") response_text = response.read().decode("utf-8") # ignore any blank blocks actual_result = [] for line in response_text.split("\n"): - if not line: continue + if not line: + continue result = json.loads(line)["result"] if len(result) != 0: actual_result = result[0] break - # return results if show_all: return actual_result - if not isinstance(actual_result, dict) or len(actual_result.get("alternative", [])) == 0: raise UnknownValueError() + if ( + not isinstance(actual_result, dict) + or len(actual_result.get("alternative", [])) == 0 + ): + raise UnknownValueError() if "confidence" in actual_result["alternative"]: # return alternative with highest confidence score - best_hypothesis = max(actual_result["alternative"], key=lambda alternative: alternative["confidence"]) + best_hypothesis = max( + actual_result["alternative"], + key=lambda alternative: alternative["confidence"], + ) else: - # when there is no confidence available, we arbitrarily choose the first hypothesis. + # when there is no confidence available, + # we arbitrarily choose the first hypothesis. best_hypothesis = actual_result["alternative"][0] - if "transcript" not in best_hypothesis: raise UnknownValueError() + if "transcript" not in best_hypothesis: + raise UnknownValueError() # https://cloud.google.com/speech-to-text/docs/basics#confidence-values - # "Your code should not require the confidence field as it is not guaranteed to be accurate, or even set, in any of the results." + # "Your code should not require the confidence field as it is not guaranteed + # to be accurate, or even set, in any of the results." confidence = best_hypothesis.get("confidence", 0.5) if with_confidence: return best_hypothesis["transcript"], confidence return best_hypothesis["transcript"] - def recognize_google_cloud(self, audio_data, credentials_json=None, language="en-US", preferred_phrases=None, show_all=False): + def recognize_google_cloud( + self, + audio_data, + credentials_json=None, + language="en-US", + preferred_phrases=None, + show_all=False, + ): """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Google Cloud Speech API. - - This function requires a Google Cloud Platform account; see the `Google Cloud Speech API Quickstart `__ for details and instructions. Basically, create a project, enable billing for the project, enable the Google Cloud Speech API for the project, and set up Service Account Key credentials for the project. The result is a JSON file containing the API credentials. The text content of this JSON file is specified by ``credentials_json``. If not specified, the library will try to automatically `find the default API credentials JSON file `__. - - The recognition language is determined by ``language``, which is a BCP-47 language tag like ``"en-US"`` (US English). A list of supported language tags can be found in the `Google Cloud Speech API documentation `__. - - If ``preferred_phrases`` is an iterable of phrase strings, those given phrases will be more likely to be recognized over similar-sounding alternatives. This is useful for things like keyword/command recognition or adding new phrases that aren't in Google's vocabulary. Note that the API imposes certain `restrictions on the list of phrase strings `__. - - Returns the most likely transcription if ``show_all`` is False (the default). Otherwise, returns the raw API response as a JSON dictionary. - - Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the credentials aren't valid, or if there is no Internet connection. + Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), + using the Google Cloud Speech API. + + This function requires a Google Cloud Platform account; see the + `Google Cloud Speech API Quickstart + `__ + for details and instructions. Basically, create a project, enable billing for + the project, enable the Google Cloud Speech API for the project, and set up + Service Account Key credentials for the project. The result is a JSON file + containing the API credentials. The text content of this JSON file is specified + by ``credentials_json``. If not specified, the library will try to + automatically `find the default API credentials JSON file + `__. + + The recognition language is determined by ``language``, which is a BCP-47 + language tag like ``"en-US"`` (US English). A list of supported language tags + can be found in the `Google Cloud Speech API documentation `__. + + If ``preferred_phrases`` is an iterable of phrase strings, those given phrases + will be more likely to be recognized over similar-sounding alternatives. + This is useful for things like keyword/command recognition or adding new + phrases that aren't in Google's vocabulary. Note that the API imposes certain + `restrictions on the list of phrase strings + `__. + + Returns the most likely transcription if ``show_all`` is False (the default). + Otherwise, returns the raw API response as a JSON dictionary. + + Raises a ``speech_recognition.UnknownValueError`` exception if the speech is + unintelligible. Raises a ``speech_recognition.RequestError`` exception if the + speech recognition operation failed, if the credentials aren't valid, or if + there is no Internet connection. """ - assert isinstance(audio_data, AudioData), "``audio_data`` must be audio data" + assert isinstance(audio_data, AudioData), AUDIO_DATA_MUST_BE_AUDIO_DATA if credentials_json is None: - assert os.environ.get('GOOGLE_APPLICATION_CREDENTIALS') is not None - assert isinstance(language, str), "``language`` must be a string" - assert preferred_phrases is None or all(isinstance(preferred_phrases, (type(""), type(u""))) for preferred_phrases in preferred_phrases), "``preferred_phrases`` must be a list of strings" + assert os.environ.get("GOOGLE_APPLICATION_CREDENTIALS") is not None + assert isinstance(language, str), LANGUAGE_MUST_BE_STRING + assert preferred_phrases is None or all( + isinstance(preferred_phrases, str) + for preferred_phrases in preferred_phrases + ), "``preferred_phrases`` must be a list of strings" try: import socket - from google.cloud import speech + from google.api_core.exceptions import GoogleAPICallError + from google.cloud import speech except ImportError: - raise RequestError('missing google-cloud-speech module: ensure that google-cloud-speech is set up correctly.') + raise RequestError( + "missing google-cloud-speech module: ensure that " + "google-cloud-speech is set up correctly." + ) if credentials_json is not None: client = speech.SpeechClient.from_service_account_json(credentials_json) @@ -790,26 +1298,30 @@ def recognize_google_cloud(self, audio_data, credentials_json=None, language="en client = speech.SpeechClient() flac_data = audio_data.get_flac_data( - convert_rate=None if 8000 <= audio_data.sample_rate <= 48000 else max(8000, min(audio_data.sample_rate, 48000)), # audio sample rate must be between 8 kHz and 48 kHz inclusive - clamp sample rate into this range - convert_width=2 # audio samples must be 16-bit + convert_rate=None + # audio sample rate must be between + # 8 kHz and 48 kHz inclusive - clamp sample rate into this range + if 8000 <= audio_data.sample_rate <= 48000 + else max(8000, min(audio_data.sample_rate, 48000)), + convert_width=2, # audio samples must be 16-bit ) audio = speech.RecognitionAudio(content=flac_data) config = { - 'encoding': speech.RecognitionConfig.AudioEncoding.FLAC, - 'sample_rate_hertz': audio_data.sample_rate, - 'language_code': language + "encoding": speech.RecognitionConfig.AudioEncoding.FLAC, + "sample_rate_hertz": audio_data.sample_rate, + "language_code": language, } if preferred_phrases is not None: - config['speechContexts'] = [speech.SpeechContext( - phrases=preferred_phrases - )] + config["speechContexts"] = [speech.SpeechContext(phrases=preferred_phrases)] if show_all: - config['enableWordTimeOffsets'] = True # some useful extra options for when we want all the output + config[ + "enableWordTimeOffsets" + ] = True # some useful extra options for when we want all the output opts = {} if self.operation_timeout and socket.getdefaulttimeout() is None: - opts['timeout'] = self.operation_timeout + opts["timeout"] = self.operation_timeout config = speech.RecognitionConfig(**config) @@ -818,292 +1330,468 @@ def recognize_google_cloud(self, audio_data, credentials_json=None, language="en except GoogleAPICallError as e: raise RequestError(e) except URLError as e: - raise RequestError("recognition connection failed: {0}".format(e.reason)) + raise RequestError(f"recognition connection failed: {e.reason}") - if show_all: return response - if len(response.results) == 0: raise UnknownValueError() + if show_all: + return response + if len(response.results) == 0: + raise UnknownValueError() - transcript = '' + transcript = "" for result in response.results: - transcript += result.alternatives[0].transcript.strip() + ' ' + transcript += result.alternatives[0].transcript.strip() + " " return transcript def recognize_wit(self, audio_data, key, show_all=False): """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Wit.ai API. + Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), + using the Wit.ai API. - The Wit.ai API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account `__ and creating an app. You will need to add at least one intent to the app before you can see the API key, though the actual intent settings don't matter. + The Wit.ai API key is specified by ``key``. Unfortunately, these are not + available without `signing up for an account `__ and creating + an app. You will need to add at least one intent to the app before you can see + the API key, though the actual intent settings don't matter. - To get the API key for a Wit.ai app, go to the app's overview page, go to the section titled "Make an API request", and look for something along the lines of ``Authorization: Bearer XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX``; ``XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`` is the API key. Wit.ai API keys are 32-character uppercase alphanumeric strings. + To get the API key for a Wit.ai app, go to the app's overview page, go to the + section titled "Make an API request", and look for something along the lines of + ``Authorization: Bearer XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX``; + ``XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`` + is the API key. Wit.ai API keys are 32-character uppercase alphanumeric strings. The recognition language is configured in the Wit.ai app settings. - Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response `__ as a JSON dictionary. + Returns the most likely transcription if ``show_all`` is false (the default). + Otherwise, returns the `raw API response + `__ as a JSON + dictionary. - Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection. + Raises a ``speech_recognition.UnknownValueError`` exception if the speech is + unintelligible. Raises a ``speech_recognition.RequestError`` exception if the + speech recognition operation failed, if the key isn't valid, or if there is no + internet connection. """ - assert isinstance(audio_data, AudioData), "Data must be audio data" - assert isinstance(key, str), "``key`` must be a string" + assert isinstance(audio_data, AudioData), AUDIO_DATA_MUST_BE_AUDIO_DATA + assert isinstance(key, str), KEY_MUST_BE_STRING wav_data = audio_data.get_wav_data( - convert_rate=None if audio_data.sample_rate >= 8000 else 8000, # audio samples must be at least 8 kHz - convert_width=2 # audio samples should be 16-bit + convert_rate=None + if audio_data.sample_rate >= 8000 + else 8000, # audio samples must be at least 8 kHz + convert_width=2, # audio samples should be 16-bit ) url = "https://api.wit.ai/speech?v=20170307" - request = Request(url, data=wav_data, headers={"Authorization": "Bearer {}".format(key), "Content-Type": "audio/wav"}) + request = Request( + url, + data=wav_data, + headers={ + "Authorization": f"Bearer {key}", + "Content-Type": "audio/wav", + }, + ) try: response = urlopen(request, timeout=self.operation_timeout) except HTTPError as e: - raise RequestError("recognition request failed: {}".format(e.reason)) + raise RequestError(f"recognition request failed: {e.reason}") except URLError as e: - raise RequestError("recognition connection failed: {}".format(e.reason)) + raise RequestError(f"recognition connection failed: {e.reason}") response_text = response.read().decode("utf-8") result = json.loads(response_text) - # return results - if show_all: return result - if "_text" not in result or result["_text"] is None: raise UnknownValueError() + if show_all: + return result + if "_text" not in result or result["_text"] is None: + raise UnknownValueError() return result["_text"] - def recognize_azure(self, audio_data, key, language="en-US", profanity="masked", location="westus", show_all=False): + def recognize_azure( + self, + audio_data, + key, + language="en-US", + profanity="masked", + location="westus", + show_all=False, + ): """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Microsoft Azure Speech API. - - The Microsoft Azure Speech API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account `__ with Microsoft Azure. - - To get the API key, go to the `Microsoft Azure Portal Resources `__ page, go to "All Resources" > "Add" > "See All" > Search "Speech > "Create", and fill in the form to make a "Speech" resource. On the resulting page (which is also accessible from the "All Resources" page in the Azure Portal), go to the "Show Access Keys" page, which will have two API keys, either of which can be used for the `key` parameter. Microsoft Azure Speech API keys are 32-character lowercase hexadecimal strings. - - The recognition language is determined by ``language``, a BCP-47 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language values can be found in the `API documentation `__ under "Interactive and dictation mode". - - Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response `__ as a JSON dictionary. - - Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection. + Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), + using the Microsoft Azure Speech API. + + The Microsoft Azure Speech API key is specified by ``key``. Unfortunately, + these are not available without `signing up for an account + `__ + with Microsoft Azure. + + To get the API key, go to the `Microsoft Azure Portal Resources + `__ page, go to "All Resources" > "Add" > "See All" + > Search "Speech > "Create", and fill in the form to make a "Speech" resource. + On the resulting page (which is also accessible from the "All Resources" page + in the Azure Portal), go to the "Show Access Keys" page, which will have two + API keys, either of which can be used for the `key` parameter. Microsoft Azure + Speech API keys are 32-character lowercase hexadecimal strings. + + The recognition language is determined by ``language``, a BCP-47 language tag + like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting + to US English. A list of supported language values can be found in the + `API documentation `__ + under "Interactive and dictation mode". + + Returns the most likely transcription if ``show_all`` is false (the default). + Otherwise, returns the `raw API response + `__ + as a JSON dictionary. + + Raises a ``speech_recognition.UnknownValueError`` exception if the speech is + unintelligible. Raises a ``speech_recognition.RequestError`` exception if the + speech recognition operation failed, if the key isn't valid, or if there is no + internet connection. """ - assert isinstance(audio_data, AudioData), "Data must be audio data" - assert isinstance(key, str), "``key`` must be a string" - # assert isinstance(result_format, str), "``format`` must be a string" # simple|detailed - assert isinstance(language, str), "``language`` must be a string" - - result_format = 'detailed' - access_token, expire_time = getattr(self, "azure_cached_access_token", None), getattr(self, "azure_cached_access_token_expiry", None) + assert isinstance(audio_data, AudioData), AUDIO_DATA_MUST_BE_AUDIO_DATA + assert isinstance(key, str), KEY_MUST_BE_STRING + # simple|detailed + # assert isinstance(result_format, str), "``format`` must be a string" + assert isinstance(language, str), LANGUAGE_MUST_BE_STRING + + result_format = "detailed" + access_token, expire_time = getattr( + self, "azure_cached_access_token", None + ), getattr(self, "azure_cached_access_token_expiry", None) allow_caching = True try: - from time import monotonic # we need monotonic time to avoid being affected by system clock changes, but this is only available in Python 3.3+ + # we need monotonic time to avoid being affected by system clock changes, + # but this is only available in Python 3.3+ + from time import ( + monotonic, + ) except ImportError: - expire_time = None # monotonic time not available, don't cache access tokens - allow_caching = False # don't allow caching, since monotonic time isn't available - if expire_time is None or monotonic() > expire_time: # caching not enabled, first credential request, or the access token from the previous one expired + # monotonic time not available, don't cache access tokens + expire_time = None + # don't allow caching, since monotonic time isn't available + allow_caching = False + + # caching not enabled, first credential request, + # or the access token from the previous one expired + if expire_time is None or monotonic() > expire_time: # get an access token using OAuth - credential_url = "https://" + location + ".api.cognitive.microsoft.com/sts/v1.0/issueToken" - credential_request = Request(credential_url, data=b"", headers={ - "Content-type": "application/x-www-form-urlencoded", - "Content-Length": "0", - "Ocp-Apim-Subscription-Key": key, - }) + credential_url = ( + "https://" + + location + + ".api.cognitive.microsoft.com/sts/v1.0/issueToken" + ) + credential_request = Request( + credential_url, + data=b"", + headers={ + "Content-type": "application/x-www-form-urlencoded", + "Content-Length": "0", + "Ocp-Apim-Subscription-Key": key, + }, + ) if allow_caching: start_time = monotonic() try: - credential_response = urlopen(credential_request, timeout=60) # credential response can take longer, use longer timeout instead of default one + # credential response can take longer, + # use longer timeout instead of default one + credential_response = urlopen(credential_request, timeout=60) except HTTPError as e: - raise RequestError("credential request failed: {}".format(e.reason)) + raise RequestError(f"credential request failed: {e.reason}") except URLError as e: - raise RequestError("credential connection failed: {}".format(e.reason)) + raise RequestError(f"credential connection failed: {e.reason}") access_token = credential_response.read().decode("utf-8") if allow_caching: # save the token for the duration it is valid for self.azure_cached_access_token = access_token - self.azure_cached_access_token_expiry = start_time + 600 # according to https://docs.microsoft.com/en-us/azure/cognitive-services/Speech-Service/rest-apis#authentication, the token expires in exactly 10 minutes + # according to https://docs.microsoft.com/en-us/azure/cognitive-services/Speech-Service/rest-apis#authentication, + # the token expires in exactly 10 minutes + self.azure_cached_access_token_expiry = start_time + 600 wav_data = audio_data.get_wav_data( convert_rate=16000, # audio samples must be 8kHz or 16 kHz - convert_width=2 # audio samples should be 16-bit + convert_width=2, # audio samples should be 16-bit ) - url = "https://" + location + ".stt.speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1?{}".format(urlencode({ - "language": language, - "format": result_format, - "profanity": profanity - })) + url = ( + "https://" + + location + + ( + ".stt.speech.microsoft.com/speech/recognition/conversation/" + "cognitiveservices/v1?{}" + ).format( + urlencode( + { + "language": language, + "format": result_format, + "profanity": profanity, + } + ) + ) + ) - if sys.version_info >= (3, 6): # chunked-transfer requests are only supported in the standard library as of Python 3.6+, use it if possible - request = Request(url, data=io.BytesIO(wav_data), headers={ - "Authorization": "Bearer {}".format(access_token), - "Content-type": "audio/wav; codec=\"audio/pcm\"; samplerate=16000", - "Transfer-Encoding": "chunked", - }) - else: # fall back on manually formatting the POST body as a chunked request - ascii_hex_data_length = "{:X}".format(len(wav_data)).encode("utf-8") - chunked_transfer_encoding_data = ascii_hex_data_length + b"\r\n" + wav_data + b"\r\n0\r\n\r\n" - request = Request(url, data=chunked_transfer_encoding_data, headers={ - "Authorization": "Bearer {}".format(access_token), - "Content-type": "audio/wav; codec=\"audio/pcm\"; samplerate=16000", + # chunked-transfer requests are only supported in the standard library + # as of Python 3.6+, use it if possible + request = Request( + url, + data=io.BytesIO(wav_data), + headers={ + "Authorization": f"Bearer {access_token}", + "Content-type": 'audio/wav; codec="audio/pcm"; samplerate=16000', "Transfer-Encoding": "chunked", - }) + }, + ) try: response = urlopen(request, timeout=self.operation_timeout) except HTTPError as e: - raise RequestError("recognition request failed: {}".format(e.reason)) + raise RequestError(f"recognition request failed: {e.reason}") except URLError as e: - raise RequestError("recognition connection failed: {}".format(e.reason)) + raise RequestError(f"recognition connection failed: {e.reason}") response_text = response.read().decode("utf-8") result = json.loads(response_text) - # return results if show_all: return result - if "RecognitionStatus" not in result or result["RecognitionStatus"] != "Success" or "NBest" not in result: + if ( + "RecognitionStatus" not in result + or result["RecognitionStatus"] != "Success" + or "NBest" not in result + ): raise UnknownValueError() - return result['NBest'][0]["Display"], result['NBest'][0]["Confidence"] + return result["NBest"][0]["Display"], result["NBest"][0]["Confidence"] def recognize_bing(self, audio_data, key, language="en-US", show_all=False): """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Microsoft Bing Speech API. - - The Microsoft Bing Speech API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account `__ with Microsoft Azure. - - To get the API key, go to the `Microsoft Azure Portal Resources `__ page, go to "All Resources" > "Add" > "See All" > Search "Bing Speech API > "Create", and fill in the form to make a "Bing Speech API" resource. On the resulting page (which is also accessible from the "All Resources" page in the Azure Portal), go to the "Show Access Keys" page, which will have two API keys, either of which can be used for the `key` parameter. Microsoft Bing Speech API keys are 32-character lowercase hexadecimal strings. - - The recognition language is determined by ``language``, a BCP-47 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language values can be found in the `API documentation `__ under "Interactive and dictation mode". - - Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response `__ as a JSON dictionary. - - Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection. + Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), + using the Microsoft Bing Speech API. + + The Microsoft Bing Speech API key is specified by ``key``. Unfortunately, + these are not available without `signing up for an account + `__ + with Microsoft Azure. + + To get the API key, go to the `Microsoft Azure Portal Resources + `__ page, go to "All Resources" > "Add" > "See All" + > Search "Bing Speech API > "Create", and fill in the form to make a + "Bing Speech API" resource. On the resulting page (which is also accessible + from the "All Resources" page in the Azure Portal), go to the + "Show Access Keys" page, which will have two API keys, either of which can be + used for the `key` parameter. Microsoft Bing Speech API keys are 32-character + lowercase hexadecimal strings. + + The recognition language is determined by ``language``, a BCP-47 language tag + like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting + to US English. A list of supported language values can be found in the `API + documentation `__ + under "Interactive and dictation mode". + + Returns the most likely transcription if ``show_all`` is false (the default). + Otherwise, returns the `raw API response `__ + as a JSON dictionary. + + Raises a ``speech_recognition.UnknownValueError`` exception if the speech is + unintelligible. Raises a ``speech_recognition.RequestError`` exception if the + speech recognition operation failed, if the key isn't valid, or if there is no + internet connection. """ - assert isinstance(audio_data, AudioData), "Data must be audio data" - assert isinstance(key, str), "``key`` must be a string" - assert isinstance(language, str), "``language`` must be a string" + assert isinstance(audio_data, AudioData), AUDIO_DATA_MUST_BE_AUDIO_DATA + assert isinstance(key, str), KEY_MUST_BE_STRING + assert isinstance(language, str), LANGUAGE_MUST_BE_STRING - access_token, expire_time = getattr(self, "bing_cached_access_token", None), getattr(self, "bing_cached_access_token_expiry", None) + access_token, expire_time = getattr( + self, "bing_cached_access_token", None + ), getattr(self, "bing_cached_access_token_expiry", None) allow_caching = True try: - from time import monotonic # we need monotonic time to avoid being affected by system clock changes, but this is only available in Python 3.3+ + # we need monotonic time to avoid being affected by system clock changes, + # but this is only available in Python 3.3+ + from time import ( + monotonic, + ) except ImportError: - expire_time = None # monotonic time not available, don't cache access tokens - allow_caching = False # don't allow caching, since monotonic time isn't available - if expire_time is None or monotonic() > expire_time: # caching not enabled, first credential request, or the access token from the previous one expired + # monotonic time not available, don't cache access tokens + expire_time = None + # don't allow caching, since monotonic time isn't available + allow_caching = False + + # caching not enabled, first credential request, + # or the access token from the previous one expired + if expire_time is None or monotonic() > expire_time: # get an access token using OAuth credential_url = "https://api.cognitive.microsoft.com/sts/v1.0/issueToken" - credential_request = Request(credential_url, data=b"", headers={ - "Content-type": "application/x-www-form-urlencoded", - "Content-Length": "0", - "Ocp-Apim-Subscription-Key": key, - }) + credential_request = Request( + credential_url, + data=b"", + headers={ + "Content-type": "application/x-www-form-urlencoded", + "Content-Length": "0", + "Ocp-Apim-Subscription-Key": key, + }, + ) if allow_caching: start_time = monotonic() try: - credential_response = urlopen(credential_request, timeout=60) # credential response can take longer, use longer timeout instead of default one + # credential response can take longer, + # use longer timeout instead of default one + credential_response = urlopen(credential_request, timeout=60) except HTTPError as e: - raise RequestError("credential request failed: {}".format(e.reason)) + raise RequestError(f"credential request failed: {e.reason}") except URLError as e: - raise RequestError("credential connection failed: {}".format(e.reason)) + raise RequestError(f"credential connection failed: {e.reason}") access_token = credential_response.read().decode("utf-8") if allow_caching: # save the token for the duration it is valid for self.bing_cached_access_token = access_token - self.bing_cached_access_token_expiry = start_time + 600 # according to https://docs.microsoft.com/en-us/azure/cognitive-services/speech/api-reference-rest/bingvoicerecognition, the token expires in exactly 10 minutes + # according to https://docs.microsoft.com/en-us/azure/cognitive-services/speech/api-reference-rest/bingvoicerecognition, + # the token expires in exactly 10 minutes + self.bing_cached_access_token_expiry = start_time + 600 wav_data = audio_data.get_wav_data( convert_rate=16000, # audio samples must be 8kHz or 16 kHz - convert_width=2 # audio samples should be 16-bit + convert_width=2, # audio samples should be 16-bit ) - url = "https://speech.platform.bing.com/speech/recognition/interactive/cognitiveservices/v1?{}".format(urlencode({ - "language": language, - "locale": language, - "requestid": uuid.uuid4(), - })) + url = "https://speech.platform.bing.com/speech/recognition/interactive/cognitiveservices/v1?{}".format( + urlencode( + { + "language": language, + "locale": language, + "requestid": uuid.uuid4(), + } + ) + ) - if sys.version_info >= (3, 6): # chunked-transfer requests are only supported in the standard library as of Python 3.6+, use it if possible - request = Request(url, data=io.BytesIO(wav_data), headers={ - "Authorization": "Bearer {}".format(access_token), - "Content-type": "audio/wav; codec=\"audio/pcm\"; samplerate=16000", - "Transfer-Encoding": "chunked", - }) - else: # fall back on manually formatting the POST body as a chunked request - ascii_hex_data_length = "{:X}".format(len(wav_data)).encode("utf-8") - chunked_transfer_encoding_data = ascii_hex_data_length + b"\r\n" + wav_data + b"\r\n0\r\n\r\n" - request = Request(url, data=chunked_transfer_encoding_data, headers={ - "Authorization": "Bearer {}".format(access_token), - "Content-type": "audio/wav; codec=\"audio/pcm\"; samplerate=16000", + # chunked-transfer requests are only supported in the standard library + # as of Python 3.6+, use it if possible + request = Request( + url, + data=io.BytesIO(wav_data), + headers={ + "Authorization": f"Bearer {access_token}", + "Content-type": 'audio/wav; codec="audio/pcm"; samplerate=16000', "Transfer-Encoding": "chunked", - }) + }, + ) try: response = urlopen(request, timeout=self.operation_timeout) except HTTPError as e: - raise RequestError("recognition request failed: {}".format(e.reason)) + raise RequestError(f"recognition request failed: {e.reason}") except URLError as e: - raise RequestError("recognition connection failed: {}".format(e.reason)) + raise RequestError(f"recognition connection failed: {e.reason}") response_text = response.read().decode("utf-8") result = json.loads(response_text) - # return results - if show_all: return result - if "RecognitionStatus" not in result or result["RecognitionStatus"] != "Success" or "DisplayText" not in result: raise UnknownValueError() + if show_all: + return result + if ( + "RecognitionStatus" not in result + or result["RecognitionStatus"] != "Success" + or "DisplayText" not in result + ): + raise UnknownValueError() return result["DisplayText"] - def recognize_lex(self, audio_data, bot_name, bot_alias, user_id, content_type="audio/l16; rate=16000; channels=1", access_key_id=None, secret_access_key=None, region=None): + def recognize_lex( + self, + audio_data, + bot_name, + bot_alias, + user_id, + content_type="audio/l16; rate=16000; channels=1", + access_key_id=None, + secret_access_key=None, + region=None, + ): """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Amazon Lex API. + Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), + using the Amazon Lex API. - If access_key_id or secret_access_key is not set it will go through the list in the link below + If access_key_id or secret_access_key is not set it will go through the list in + the link below http://boto3.readthedocs.io/en/latest/guide/configuration.html#configuring-credentials """ - assert isinstance(audio_data, AudioData), "Data must be audio data" + assert isinstance(audio_data, AudioData), AUDIO_DATA_MUST_BE_AUDIO_DATA assert isinstance(bot_name, str), "``bot_name`` must be a string" assert isinstance(bot_alias, str), "``bot_alias`` must be a string" assert isinstance(user_id, str), "``user_id`` must be a string" assert isinstance(content_type, str), "``content_type`` must be a string" - assert access_key_id is None or isinstance(access_key_id, str), "``access_key_id`` must be a string" - assert secret_access_key is None or isinstance(secret_access_key, str), "``secret_access_key`` must be a string" + assert access_key_id is None or isinstance( + access_key_id, str + ), "``access_key_id`` must be a string" + assert secret_access_key is None or isinstance( + secret_access_key, str + ), "``secret_access_key`` must be a string" assert region is None or isinstance(region, str), "``region`` must be a string" try: import boto3 except ImportError: - raise RequestError("missing boto3 module: ensure that boto3 is set up correctly.") - - client = boto3.client('lex-runtime', aws_access_key_id=access_key_id, - aws_secret_access_key=secret_access_key, - region_name=region) + raise RequestError( + "missing boto3 module: ensure that boto3 is set up correctly." + ) - raw_data = audio_data.get_raw_data( - convert_rate=16000, convert_width=2 + client = boto3.client( + "lex-runtime", + aws_access_key_id=access_key_id, + aws_secret_access_key=secret_access_key, + region_name=region, ) + raw_data = audio_data.get_raw_data(convert_rate=16000, convert_width=2) + accept = "text/plain; charset=utf-8" - response = client.post_content(botName=bot_name, botAlias=bot_alias, userId=user_id, contentType=content_type, accept=accept, inputStream=raw_data) + response = client.post_content( + botName=bot_name, + botAlias=bot_alias, + userId=user_id, + contentType=content_type, + accept=accept, + inputStream=raw_data, + ) return response["inputTranscript"] def recognize_houndify(self, audio_data, client_id, client_key, show_all=False): """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Houndify API. - - The Houndify client ID and client key are specified by ``client_id`` and ``client_key``, respectively. Unfortunately, these are not available without `signing up for an account `__. Once logged into the `dashboard `__, you will want to select "Register a new client", and fill in the form as necessary. When at the "Enable Domains" page, enable the "Speech To Text Only" domain, and then select "Save & Continue". - - To get the client ID and client key for a Houndify client, go to the `dashboard `__ and select the client's "View Details" link. On the resulting page, the client ID and client key will be visible. Client IDs and client keys are both Base64-encoded strings. + Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), + using the Houndify API. + + The Houndify client ID and client key are specified by ``client_id`` and + ``client_key``, respectively. Unfortunately, these are not available without + `signing up for an account `__. Once logged + into the `dashboard `__, you will want to + select "Register a new client", and fill in the form as necessary. When at the + "Enable Domains" page, enable the "Speech To Text Only" domain, and then select + "Save & Continue". + + To get the client ID and client key for a Houndify client, go to the `dashboard + `__ and select the client's "View Details" + link. On the resulting page, the client ID and client key will be visible. + Client IDs and client keys are both Base64-encoded strings. Currently, only English is supported as a recognition language. - Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the raw API response as a JSON dictionary. + Returns the most likely transcription if ``show_all`` is false (the default). + Otherwise, returns the raw API response as a JSON dictionary. - Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection. + Raises a ``speech_recognition.UnknownValueError`` exception if the speech is + unintelligible. Raises a ``speech_recognition.RequestError`` exception if the + speech recognition operation failed, if the key isn't valid, or if there is no + internet connection. """ - assert isinstance(audio_data, AudioData), "Data must be audio data" + assert isinstance(audio_data, AudioData), AUDIO_DATA_MUST_BE_AUDIO_DATA assert isinstance(client_id, str), "``client_id`` must be a string" assert isinstance(client_key, str), "``client_key`` must be a string" wav_data = audio_data.get_wav_data( - convert_rate=None if audio_data.sample_rate in [8000, 16000] else 16000, # audio samples must be 8 kHz or 16 kHz - convert_width=2 # audio samples should be 16-bit + convert_rate=None + if audio_data.sample_rate in [8000, 16000] + else 16000, # audio samples must be 8 kHz or 16 kHz + convert_width=2, # audio samples should be 16-bit ) url = "https://api.houndify.com/v1/audio" user_id, request_id = str(uuid.uuid4()), str(uuid.uuid4()) @@ -1111,105 +1799,142 @@ def recognize_houndify(self, audio_data, client_id, client_key, show_all=False): request_signature = base64.urlsafe_b64encode( hmac.new( base64.urlsafe_b64decode(client_key), - user_id.encode("utf-8") + b";" + request_id.encode("utf-8") + request_time.encode("utf-8"), - hashlib.sha256 + user_id.encode("utf-8") + + b";" + + request_id.encode("utf-8") + + request_time.encode("utf-8"), + hashlib.sha256, ).digest() # get the HMAC digest as bytes ).decode("utf-8") - request = Request(url, data=wav_data, headers={ - "Content-Type": "application/json", - "Hound-Request-Info": json.dumps({"ClientID": client_id, "UserID": user_id}), - "Hound-Request-Authentication": "{};{}".format(user_id, request_id), - "Hound-Client-Authentication": "{};{};{}".format(client_id, request_time, request_signature) - }) + request = Request( + url, + data=wav_data, + headers={ + "Content-Type": "application/json", + "Hound-Request-Info": json.dumps( + {"ClientID": client_id, "UserID": user_id} + ), + "Hound-Request-Authentication": f"{user_id};{request_id}", + "Hound-Client-Authentication": "{};{};{}".format( + client_id, request_time, request_signature + ), + }, + ) try: response = urlopen(request, timeout=self.operation_timeout) except HTTPError as e: - raise RequestError("recognition request failed: {}".format(e.reason)) + raise RequestError(f"recognition request failed: {e.reason}") except URLError as e: - raise RequestError("recognition connection failed: {}".format(e.reason)) + raise RequestError(f"recognition connection failed: {e.reason}") response_text = response.read().decode("utf-8") result = json.loads(response_text) - # return results - if show_all: return result + if show_all: + return result if "Disambiguation" not in result or result["Disambiguation"] is None: raise UnknownValueError() - return result['Disambiguation']['ChoiceData'][0]['Transcription'], result['Disambiguation']['ChoiceData'][0]['ConfidenceScore'] + return ( + result["Disambiguation"]["ChoiceData"][0]["Transcription"], + result["Disambiguation"]["ChoiceData"][0]["ConfidenceScore"], + ) - def recognize_amazon(self, audio_data, bucket_name=None, access_key_id=None, secret_access_key=None, region=None, job_name=None, file_key=None): + def recognize_amazon( + self, + audio_data, + bucket_name=None, + access_key_id=None, + secret_access_key=None, + region=None, + job_name=None, + file_key=None, + ): """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance) using Amazon Transcribe. - https://aws.amazon.com/transcribe/ - If access_key_id or secret_access_key is not set it will go through the list in the link below - http://boto3.readthedocs.io/en/latest/guide/configuration.html#configuring-credentials + Performs speech recognition on ``audio_data`` (an ``AudioData`` instance) using + Amazon Transcribe. https://aws.amazon.com/transcribe/ + If access_key_id or secret_access_key is not set it will go through the list in + the link: http://boto3.readthedocs.io/en/latest/guide/configuration.html#configuring-credentials """ - assert access_key_id is None or isinstance(access_key_id, str), "``access_key_id`` must be a string" - assert secret_access_key is None or isinstance(secret_access_key, str), "``secret_access_key`` must be a string" + assert access_key_id is None or isinstance( + access_key_id, str + ), "``access_key_id`` must be a string" + assert secret_access_key is None or isinstance( + secret_access_key, str + ), "``secret_access_key`` must be a string" assert region is None or isinstance(region, str), "``region`` must be a string" + import multiprocessing import traceback import uuid - import multiprocessing + from botocore.exceptions import ClientError + proc = multiprocessing.current_process() check_existing = audio_data is None and job_name - bucket_name = bucket_name or ('%s-%s' % (str(uuid.uuid4()), proc.pid)) - job_name = job_name or ('%s-%s' % (str(uuid.uuid4()), proc.pid)) + bucket_name = bucket_name or (f"{str(uuid.uuid4())}-{proc.pid}") + job_name = job_name or (f"{str(uuid.uuid4())}-{proc.pid}") try: import boto3 except ImportError: - raise RequestError("missing boto3 module: ensure that boto3 is set up correctly.") + raise RequestError( + "missing boto3 module: ensure that boto3 is set up correctly." + ) transcribe = boto3.client( - 'transcribe', + "transcribe", aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key, - region_name=region) + region_name=region, + ) - s3 = boto3.client('s3', + s3 = boto3.client( + "s3", aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key, - region_name=region) + region_name=region, + ) session = boto3.Session( aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key, - region_name=region + region_name=region, ) # Upload audio data to S3. - filename = '%s.wav' % job_name + filename = "%s.wav" % job_name try: # Bucket creation fails surprisingly often, even if the bucket exists. # print('Attempting to create bucket %s...' % bucket_name) s3.create_bucket(Bucket=bucket_name) except ClientError as exc: - print('Error creating bucket %s: %s' % (bucket_name, exc)) - s3res = session.resource('s3') - bucket = s3res.Bucket(bucket_name) + print(f"Error creating bucket {bucket_name}: {exc}") + s3res = session.resource("s3") + s3res.Bucket(bucket_name) if audio_data is not None: - print('Uploading audio data...') + print("Uploading audio data...") wav_data = audio_data.get_wav_data() s3.put_object(Bucket=bucket_name, Key=filename, Body=wav_data) object_acl = s3res.ObjectAcl(bucket_name, filename) - object_acl.put(ACL='public-read') + object_acl.put(ACL="public-read") else: - print('Skipping audio upload.') - job_uri = 'https://%s.s3.amazonaws.com/%s' % (bucket_name, filename) + print("Skipping audio upload.") + job_uri = f"https://{bucket_name}.s3.amazonaws.com/{filename}" if check_existing: - # Wait for job to complete. try: status = transcribe.get_transcription_job(TranscriptionJobName=job_name) except ClientError as exc: - print('!'*80) - print('Error getting job:', exc.response) - if exc.response['Error']['Code'] == 'BadRequestException' and "The requested job couldn't be found" in str(exc): + print("!" * 80) + print("Error getting job:", exc.response) + if exc.response["Error"]["Code"] == ("BadRequestException") and ( + "The requested job couldn't be found" + ) in str(exc): # Some error caused the job we recorded to not exist on AWS. - # Likely we were interrupted right after retrieving and deleting the job but before recording the transcript. + # Likely we were interrupted right after retrieving and deleting + # the job but before recording the transcript. + # Reset and try again later. exc = TranscriptionNotReady() exc.job_name = None @@ -1218,81 +1943,88 @@ def recognize_amazon(self, audio_data, bucket_name=None, access_key_id=None, sec else: # Some other error happened, so re-raise. raise - - job = status['TranscriptionJob'] - if job['TranscriptionJobStatus'] in ['COMPLETED'] and 'TranscriptFileUri' in job['Transcript']: + job = status["TranscriptionJob"] + if ( + job["TranscriptionJobStatus"] in ["COMPLETED"] + and "TranscriptFileUri" in job["Transcript"] + ): # Retrieve transcription JSON containing transcript. - transcript_uri = job['Transcript']['TranscriptFileUri'] - import urllib.request, json + transcript_uri = job["Transcript"]["TranscriptFileUri"] + import json + import urllib.request + with urllib.request.urlopen(transcript_uri) as json_data: d = json.load(json_data) confidences = [] - for item in d['results']['items']: - confidences.append(float(item['alternatives'][0]['confidence'])) + for item in d["results"]["items"]: + confidences.append(float(item["alternatives"][0]["confidence"])) confidence = 0.5 if confidences: - confidence = sum(confidences)/float(len(confidences)) - transcript = d['results']['transcripts'][0]['transcript'] + confidence = sum(confidences) / float(len(confidences)) + transcript = d["results"]["transcripts"][0]["transcript"] # Delete job. try: - transcribe.delete_transcription_job(TranscriptionJobName=job_name) # cleanup + # cleanup + transcribe.delete_transcription_job( + TranscriptionJobName=job_name + ) except Exception as exc: - print('Warning, could not clean up transcription: %s' % exc) + print("Warning, could not clean up transcription: %s" % exc) traceback.print_exc() # Delete S3 file. s3.delete_object(Bucket=bucket_name, Key=filename) return transcript, confidence - elif job['TranscriptionJobStatus'] in ['FAILED']: - + elif job["TranscriptionJobStatus"] in ["FAILED"]: # Delete job. try: - transcribe.delete_transcription_job(TranscriptionJobName=job_name) # cleanup + # cleanup + transcribe.delete_transcription_job(TranscriptionJobName=job_name) except Exception as exc: - print('Warning, could not clean up transcription: %s' % exc) + print("Warning, could not clean up transcription: %s" % exc) traceback.print_exc() # Delete S3 file. s3.delete_object(Bucket=bucket_name, Key=filename) - + exc = TranscriptionFailed() exc.job_name = None exc.file_key = None raise exc else: # Keep waiting. - print('Keep waiting.') + print("Keep waiting.") exc = TranscriptionNotReady() exc.job_name = job_name exc.file_key = None raise exc else: - # Launch the transcription job. # try: - # transcribe.delete_transcription_job(TranscriptionJobName=job_name) # pre-cleanup + # # pre-cleanup + # transcribe.delete_transcription_job(TranscriptionJobName=job_name) # except: - # # It's ok if this fails because the job hopefully doesn't exist yet. - # pass + # # It's ok if this fails because the job hopefully doesn't exist yet. + # pass try: transcribe.start_transcription_job( TranscriptionJobName=job_name, - Media={'MediaFileUri': job_uri}, - MediaFormat='wav', - LanguageCode='en-US' + Media={"MediaFileUri": job_uri}, + MediaFormat="wav", + LanguageCode="en-US", ) exc = TranscriptionNotReady() exc.job_name = job_name exc.file_key = None raise exc except ClientError as exc: - print('!'*80) - print('Error starting job:', exc.response) - if exc.response['Error']['Code'] == 'LimitExceededException': + print("!" * 80) + print("Error starting job:", exc.response) + if exc.response["Error"]["Code"] == "LimitExceededException": # Could not start job. Cancel everything. s3.delete_object(Bucket=bucket_name, Key=filename) exc = TranscriptionNotReady() @@ -1310,7 +2042,7 @@ def recognize_assemblyai(self, audio_data, api_token, job_name=None, **kwargs): """ def read_file(filename, chunk_size=5242880): - with open(filename, 'rb') as _file: + with open(filename, "rb") as _file: while True: data = _file.read(chunk_size) if not data: @@ -1327,46 +2059,43 @@ def read_file(filename, chunk_size=5242880): } response = requests.get(endpoint, headers=headers) data = response.json() - status = data['status'] + status = data["status"] - if status == 'error': + if status == "error": # Handle error. exc = TranscriptionFailed() exc.job_name = None exc.file_key = None raise exc # Handle success. - elif status == 'completed': - confidence = data['confidence'] - text = data['text'] + elif status == "completed": + confidence = data["confidence"] + text = data["text"] return text, confidence # Otherwise keep waiting. - print('Keep waiting.') + print("Keep waiting.") exc = TranscriptionNotReady() exc.job_name = job_name exc.file_key = None raise exc else: # Upload file. - headers = {'authorization': api_token} - response = requests.post('https://api.assemblyai.com/v2/upload', - headers=headers, - data=read_file(audio_data)) - upload_url = response.json()['upload_url'] + headers = {"authorization": api_token} + response = requests.post( + "https://api.assemblyai.com/v2/upload", + headers=headers, + data=read_file(audio_data), + ) + upload_url = response.json()["upload_url"] # Queue file for transcription. endpoint = "https://api.assemblyai.com/v2/transcript" - json = { - "audio_url": upload_url - } - headers = { - "authorization": api_token, - "content-type": "application/json" - } + json = {"audio_url": upload_url} + headers = {"authorization": api_token, "content-type": "application/json"} response = requests.post(endpoint, json=json, headers=headers) data = response.json() - transciption_id = data['id'] + transciption_id = data["id"] exc = TranscriptionNotReady() exc.job_name = transciption_id exc.file_key = None @@ -1374,51 +2103,83 @@ def read_file(filename, chunk_size=5242880): def recognize_ibm(self, audio_data, key, language="en-US", show_all=False): """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the IBM Speech to Text API. - - The IBM Speech to Text username and password are specified by ``username`` and ``password``, respectively. Unfortunately, these are not available without `signing up for an account `__. Once logged into the Bluemix console, follow the instructions for `creating an IBM Watson service instance `__, where the Watson service is "Speech To Text". IBM Speech to Text usernames are strings of the form XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX, while passwords are mixed-case alphanumeric strings. - - The recognition language is determined by ``language``, an RFC5646 language tag with a dialect like ``"en-US"`` (US English) or ``"zh-CN"`` (Mandarin Chinese), defaulting to US English. The supported language values are listed under the ``model`` parameter of the `audio recognition API documentation `__, in the form ``LANGUAGE_BroadbandModel``, where ``LANGUAGE`` is the language value. - - Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response `__ as a JSON dictionary. - - Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection. + Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), + using the IBM Speech to Text API. + + The IBM Speech to Text username and password are specified by ``username`` and + ``password``, respectively. Unfortunately, these are not available without + `signing up for an account `__. + Once logged into the Bluemix console, follow the instructions for `creating an + IBM Watson service instance `__, + where the Watson service is "Speech To Text". IBM Speech to Text usernames are + strings of the form XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX, while passwords are + mixed-case alphanumeric strings. + + The recognition language is determined by ``language``, an RFC5646 language tag + with a dialect like ``"en-US"`` (US English) or ``"zh-CN"`` (Mandarin Chinese), + defaulting to US English. The supported language values are listed under the + ``model`` parameter of the `audio recognition API documentation `__, + in the form ``LANGUAGE_BroadbandModel``, where ``LANGUAGE`` is + the language value. + + Returns the most likely transcription if ``show_all`` is false (the default). + Otherwise, returns the `raw API response `__ + as a JSON dictionary. + + Raises a ``speech_recognition.UnknownValueError`` exception if the speech is + unintelligible. Raises a ``speech_recognition.RequestError`` exception if the + speech recognition operation failed, if the key isn't valid, or if there is no + internet connection. """ - assert isinstance(audio_data, AudioData), "Data must be audio data" - assert isinstance(key, str), "``key`` must be a string" + assert isinstance(audio_data, AudioData), AUDIO_DATA_MUST_BE_AUDIO_DATA + assert isinstance(key, str), KEY_MUST_BE_STRING flac_data = audio_data.get_flac_data( - convert_rate=None if audio_data.sample_rate >= 16000 else 16000, # audio samples should be at least 16 kHz - convert_width=None if audio_data.sample_width >= 2 else 2 # audio samples should be at least 16-bit + convert_rate=None + if audio_data.sample_rate >= 16000 + else 16000, # audio samples should be at least 16 kHz + convert_width=None + if audio_data.sample_width >= 2 + else 2, # audio samples should be at least 16-bit ) url = "https://gateway-wdc.watsonplatform.net/speech-to-text/api/v1/recognize" - request = Request(url, data=flac_data, headers={ - "Content-Type": "audio/x-flac", - }) - request.get_method = lambda: 'POST' - username = 'apikey' + request = Request( + url, + data=flac_data, + headers={ + "Content-Type": "audio/x-flac", + }, + ) + request.get_method = lambda: "POST" + username = "apikey" password = key - authorization_value = base64.standard_b64encode("{}:{}".format(username, password).encode("utf-8")).decode("utf-8") - request.add_header("Authorization", "Basic {}".format(authorization_value)) + authorization_value = base64.standard_b64encode( + f"{username}:{password}".encode() + ).decode("utf-8") + request.add_header("Authorization", f"Basic {authorization_value}") try: response = urlopen(request, timeout=self.operation_timeout) except HTTPError as e: - raise RequestError("recognition request failed: {}".format(e.reason)) + raise RequestError(f"recognition request failed: {e.reason}") except URLError as e: - raise RequestError("recognition connection failed: {}".format(e.reason)) + raise RequestError(f"recognition connection failed: {e.reason}") response_text = response.read().decode("utf-8") result = json.loads(response_text) - # return results if show_all: return result - if "results" not in result or len(result["results"]) < 1 or "alternatives" not in result["results"][0]: + if ( + "results" not in result + or len(result["results"]) < 1 + or "alternatives" not in result["results"][0] + ): raise UnknownValueError() transcription = [] confidence = None for utterance in result["results"]: - if "alternatives" not in utterance: raise UnknownValueError() + if "alternatives" not in utterance: + raise UnknownValueError() for hypothesis in utterance["alternatives"]: if "transcript" in hypothesis: transcription.append(hypothesis["transcript"]) @@ -1426,10 +2187,15 @@ def recognize_ibm(self, audio_data, key, language="en-US", show_all=False): break return "\n".join(transcription), confidence - lasttfgraph = '' + lasttfgraph = "" tflabels = None - def recognize_tensorflow(self, audio_data, tensor_graph='tensorflow-data/conv_actions_frozen.pb', tensor_label='tensorflow-data/conv_actions_labels.txt'): + def recognize_tensorflow( + self, + audio_data, + tensor_graph="tensorflow-data/conv_actions_frozen.pb", + tensor_label="tensorflow-data/conv_actions_labels.txt", + ): """ Performs speech recognition on ``audio_data`` (an ``AudioData`` instance). @@ -1437,35 +2203,35 @@ def recognize_tensorflow(self, audio_data, tensor_graph='tensorflow-data/conv_ac Path to Tensor Labels file loaded from ``tensor_label``. """ - assert isinstance(audio_data, AudioData), "Data must be audio data" + assert isinstance(audio_data, AudioData), AUDIO_DATA_MUST_BE_AUDIO_DATA assert isinstance(tensor_graph, str), "``tensor_graph`` must be a string" assert isinstance(tensor_label, str), "``tensor_label`` must be a string" try: import tensorflow as tf except ImportError: - raise RequestError("missing tensorflow module: ensure that tensorflow is set up correctly.") + raise RequestError( + "missing tensorflow module: ensure that tensorflow is set up correctly." + ) if not (tensor_graph == self.lasttfgraph): self.lasttfgraph = tensor_graph # load graph - with tf.gfile.FastGFile(tensor_graph, 'rb') as f: + with tf.gfile.FastGFile(tensor_graph, "rb") as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) - tf.import_graph_def(graph_def, name='') + tf.import_graph_def(graph_def, name="") # load labels self.tflabels = [line.rstrip() for line in tf.gfile.GFile(tensor_label)] - wav_data = audio_data.get_wav_data( - convert_rate=16000, convert_width=2 - ) + wav_data = audio_data.get_wav_data(convert_rate=16000, convert_width=2) with tf.Session() as sess: - input_layer_name = 'wav_data:0' - output_layer_name = 'labels_softmax:0' + input_layer_name = "wav_data:0" + output_layer_name = "labels_softmax:0" softmax_tensor = sess.graph.get_tensor_by_name(output_layer_name) - predictions, = sess.run(softmax_tensor, {input_layer_name: wav_data}) + (predictions,) = sess.run(softmax_tensor, {input_layer_name: wav_data}) # Sort labels in order of confidence top_k = predictions.argsort()[-1:][::-1] @@ -1473,28 +2239,46 @@ def recognize_tensorflow(self, audio_data, tensor_graph='tensorflow-data/conv_ac human_string = self.tflabels[node_id] return human_string - def recognize_whisper(self, audio_data, model="base", show_dict=False, load_options=None, language=None, translate=False, **transcribe_options): + def recognize_whisper( + self, + audio_data, + model="base", + show_dict=False, + load_options=None, + language=None, + translate=False, + **transcribe_options, + ): """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using Whisper. + Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), + using Whisper. - The recognition language is determined by ``language``, an uncapitalized full language name like "english" or "chinese". See the full language list at https://github.com/openai/whisper/blob/main/whisper/tokenizer.py + The recognition language is determined by ``language``, an uncapitalized full + language name like "english" or "chinese". See the full language list at https://github.com/openai/whisper/blob/main/whisper/tokenizer.py - model can be any of tiny, base, small, medium, large, tiny.en, base.en, small.en, medium.en. See https://github.com/openai/whisper for more details. + model can be any of tiny, base, small, medium, large, tiny.en, base.en, + small.en, medium.en. See https://github.com/openai/whisper for more details. - If show_dict is true, returns the full dict response from Whisper, including the detected language. Otherwise returns only the transcription. + If show_dict is true, returns the full dict response from Whisper, including + the detected language. Otherwise returns only the transcription. You can translate the result to english with Whisper by passing translate=True - Other values are passed directly to whisper. See https://github.com/openai/whisper/blob/main/whisper/transcribe.py for all options + Other values are passed directly to whisper. See https://github.com/openai/whisper/blob/main/whisper/transcribe.py + for all options """ - assert isinstance(audio_data, AudioData), "Data must be audio data" + assert isinstance(audio_data, AudioData), AUDIO_DATA_MUST_BE_AUDIO_DATA import numpy as np import soundfile as sf import torch import whisper - if load_options or not hasattr(self, "whisper_model") or self.whisper_model.get(model) is None: + if ( + load_options + or not hasattr(self, "whisper_model") + or self.whisper_model.get(model) is None + ): self.whisper_model = getattr(self, "whisper_model", {}) self.whisper_model[model] = whisper.load_model(model, **load_options or {}) @@ -1509,7 +2293,7 @@ def recognize_whisper(self, audio_data, model="base", show_dict=False, load_opti language=language, task="translate" if translate else None, fp16=torch.cuda.is_available(), - **transcribe_options + **transcribe_options, ) if show_dict: @@ -1518,28 +2302,34 @@ def recognize_whisper(self, audio_data, model="base", show_dict=False, load_opti return result["text"] recognize_whisper_api = whisper.recognize_whisper_api - - def recognize_vosk(self, audio_data, language='en'): - from vosk import Model, KaldiRecognizer - - assert isinstance(audio_data, AudioData), "Data must be audio data" - - if not hasattr(self, 'vosk_model'): + + def recognize_vosk(self, audio_data, language="en"): + from vosk import KaldiRecognizer, Model + + assert isinstance(audio_data, AudioData), AUDIO_DATA_MUST_BE_AUDIO_DATA + + if not hasattr(self, "vosk_model"): if not os.path.exists("model"): - return "Please download the model from https://github.com/alphacep/vosk-api/blob/master/doc/models.md and unpack as 'model' in the current folder." - exit (1) + return ( + "Please download the model from " + "https://github.com/alphacep/vosk-api/blob/master/doc/models.md " + "and unpack as 'model' in the current folder." + ) self.vosk_model = Model("model") - rec = KaldiRecognizer(self.vosk_model, 16000); - - rec.AcceptWaveform(audio_data.get_raw_data(convert_rate=16000, convert_width=2)); + rec = KaldiRecognizer(self.vosk_model, 16000) + + rec.AcceptWaveform(audio_data.get_raw_data(convert_rate=16000, convert_width=2)) finalRecognition = rec.FinalResult() - + return finalRecognition -class PortableNamedTemporaryFile(object): - """Limited replacement for ``tempfile.NamedTemporaryFile``, except unlike ``tempfile.NamedTemporaryFile``, the file can be opened again while it's currently open, even on Windows.""" +class PortableNamedTemporaryFile: + """Limited replacement for ``tempfile.NamedTemporaryFile``, except unlike + ``tempfile.NamedTemporaryFile``, the file can be opened again while it's currently + open, even on Windows.""" + def __init__(self, mode="w+b"): self.mode = mode @@ -1573,24 +2363,75 @@ def flush(self, *args, **kwargs): WavFile = AudioFile # WavFile was renamed to AudioFile in 3.4.1 -def recognize_api(self, audio_data, client_access_token, language="en", session_id=None, show_all=False): +def recognize_api( + self, + audio_data, + client_access_token, + language="en", + session_id=None, + show_all=False, +): wav_data = audio_data.get_wav_data(convert_rate=16000, convert_width=2) url = "https://api.api.ai/v1/query" while True: boundary = uuid.uuid4().hex - if boundary.encode("utf-8") not in wav_data: break - if session_id is None: session_id = uuid.uuid4().hex - data = b"--" + boundary.encode("utf-8") + b"\r\n" + b"Content-Disposition: form-data; name=\"request\"\r\n" + b"Content-Type: application/json\r\n" + b"\r\n" + b"{\"v\": \"20150910\", \"sessionId\": \"" + session_id.encode("utf-8") + b"\", \"lang\": \"" + language.encode("utf-8") + b"\"}\r\n" + b"--" + boundary.encode("utf-8") + b"\r\n" + b"Content-Disposition: form-data; name=\"voiceData\"; filename=\"audio.wav\"\r\n" + b"Content-Type: audio/wav\r\n" + b"\r\n" + wav_data + b"\r\n" + b"--" + boundary.encode("utf-8") + b"--\r\n" - request = Request(url, data=data, headers={"Authorization": "Bearer {}".format(client_access_token), "Content-Length": str(len(data)), "Expect": "100-continue", "Content-Type": "multipart/form-data; boundary={}".format(boundary)}) - try: response = urlopen(request, timeout=10) - except HTTPError as e: raise RequestError("recognition request failed: {}".format(e.reason)) - except URLError as e: raise RequestError("recognition connection failed: {}".format(e.reason)) + if boundary.encode("utf-8") not in wav_data: + break + if session_id is None: + session_id = uuid.uuid4().hex + data = ( + b"--" + + boundary.encode("utf-8") + + b"\r\n" + + b'Content-Disposition: form-data; name="request"\r\n' + + b"Content-Type: application/json\r\n" + + b"\r\n" + + b'{"v": "20150910", "sessionId": "' + + session_id.encode("utf-8") + + b'", "lang": "' + + language.encode("utf-8") + + b'"}\r\n' + + b"--" + + boundary.encode("utf-8") + + b"\r\n" + + b'Content-Disposition: form-data; name="voiceData"; filename="audio.wav"\r\n' + + b"Content-Type: audio/wav\r\n" + + b"\r\n" + + wav_data + + b"\r\n" + + b"--" + + boundary.encode("utf-8") + + b"--\r\n" + ) + request = Request( + url, + data=data, + headers={ + "Authorization": f"Bearer {client_access_token}", + "Content-Length": str(len(data)), + "Expect": "100-continue", + "Content-Type": f"multipart/form-data; boundary={boundary}", + }, + ) + try: + response = urlopen(request, timeout=10) + except HTTPError as e: + raise RequestError(f"recognition request failed: {e.reason}") + except URLError as e: + raise RequestError(f"recognition connection failed: {e.reason}") response_text = response.read().decode("utf-8") result = json.loads(response_text) - if show_all: return result - if "status" not in result or "errorType" not in result["status"] or result["status"]["errorType"] != "success": + if show_all: + return result + if ( + "status" not in result + or "errorType" not in result["status"] + or result["status"]["errorType"] != "success" + ): raise UnknownValueError() return result["result"]["resolvedQuery"] -Recognizer.recognize_api = classmethod(recognize_api) # API.AI Speech Recognition is deprecated/not recommended as of 3.5.0, and currently is only optionally available for paid plans +# API.AI Speech Recognition is deprecated/not recommended as of 3.5.0, +# and currently is only optionally available for paid plans +Recognizer.recognize_api = classmethod(recognize_api) diff --git a/custom_speech_recognition/__main__.py b/custom_speech_recognition/__main__.py index 68f5652..c05a1fc 100644 --- a/custom_speech_recognition/__main__.py +++ b/custom_speech_recognition/__main__.py @@ -5,20 +5,27 @@ try: print("A moment of silence, please...") - with m as source: r.adjust_for_ambient_noise(source) - print("Set minimum energy threshold to {}".format(r.energy_threshold)) + with m as source: + r.adjust_for_ambient_noise(source) + print(f"Set minimum energy threshold to {r.energy_threshold}") while True: print("Say something!") - with m as source: audio = r.listen(source) + with m as source: + audio = r.listen(source) print("Got it! Now to recognize it...") try: # recognize speech using Google Speech Recognition value = r.recognize_google(audio) - print("You said {}".format(value)) + print(f"You said {value}") except sr.UnknownValueError: print("Oops! Didn't catch that") except sr.RequestError as e: - print("Uh oh! Couldn't request results from Google Speech Recognition service; {0}".format(e)) + print( + ( + "Uh oh! Couldn't request results from " + "Google Speech Recognition service; {}" + ).format(e) + ) except KeyboardInterrupt: pass diff --git a/custom_speech_recognition/audio.py b/custom_speech_recognition/audio.py index 0bff55e..b5d462b 100644 --- a/custom_speech_recognition/audio.py +++ b/custom_speech_recognition/audio.py @@ -9,17 +9,24 @@ import wave -class AudioData(object): +class AudioData: """ Creates a new ``AudioData`` instance, which represents mono audio data. - The raw audio data is specified by ``frame_data``, which is a sequence of bytes representing audio samples. This is the frame data structure used by the PCM WAV format. + The raw audio data is specified by ``frame_data``, which is a sequence of bytes + representing audio samples. This is the frame data structure used by the PCM WAV + format. - The width of each sample, in bytes, is specified by ``sample_width``. Each group of ``sample_width`` bytes represents a single audio sample. + The width of each sample, in bytes, is specified by ``sample_width``. Each group of + ``sample_width`` bytes represents a single audio sample. - The audio data is assumed to have a sample rate of ``sample_rate`` samples per second (Hertz). + The audio data is assumed to have a sample rate of ``sample_rate`` samples per + second (Hertz). - Usually, instances of this class are obtained from ``recognizer_instance.record`` or ``recognizer_instance.listen``, or in the callback for ``recognizer_instance.listen_in_background``, rather than instantiating them directly. + Usually, instances of this class are obtained from ``recognizer_instance.record`` + or ``recognizer_instance.listen``, or in the callback for + ``recognizer_instance.listen_in_background``, rather than instantiating them + directly. """ def __init__(self, frame_data, sample_rate, sample_width): @@ -33,9 +40,12 @@ def __init__(self, frame_data, sample_rate, sample_width): def get_segment(self, start_ms=None, end_ms=None): """ - Returns a new ``AudioData`` instance, trimmed to a given time interval. In other words, an ``AudioData`` instance with the same audio data except starting at ``start_ms`` milliseconds in and ending ``end_ms`` milliseconds in. + Returns a new ``AudioData`` instance, trimmed to a given time interval. + In other words, an ``AudioData`` instance with the same audio data except + starting at ``start_ms`` milliseconds in and ending ``end_ms`` milliseconds in. - If not specified, ``start_ms`` defaults to the beginning of the audio, and ``end_ms`` defaults to the end. + If not specified, ``start_ms`` defaults to the beginning of the audio, and + ``end_ms`` defaults to the end. """ assert ( start_ms is None or start_ms >= 0 @@ -46,15 +56,11 @@ def get_segment(self, start_ms=None, end_ms=None): if start_ms is None: start_byte = 0 else: - start_byte = int( - (start_ms * self.sample_rate * self.sample_width) // 1000 - ) + start_byte = int((start_ms * self.sample_rate * self.sample_width) // 1000) if end_ms is None: end_byte = len(self.frame_data) else: - end_byte = int( - (end_ms * self.sample_rate * self.sample_width) // 1000 - ) + end_byte = int((end_ms * self.sample_rate * self.sample_width) // 1000) return AudioData( self.frame_data[start_byte:end_byte], self.sample_rate, @@ -63,13 +69,17 @@ def get_segment(self, start_ms=None, end_ms=None): def get_raw_data(self, convert_rate=None, convert_width=None): """ - Returns a byte string representing the raw frame data for the audio represented by the ``AudioData`` instance. + Returns a byte string representing the raw frame data for the audio represented + by the ``AudioData`` instance. - If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match. + If ``convert_rate`` is specified and the audio sample rate is not + ``convert_rate`` Hz, the resulting audio is resampled to match. - If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match. + If ``convert_width`` is specified and the audio samples are not + ``convert_width`` bytes each, the resulting audio is converted to match. - Writing these bytes directly to a file results in a valid `RAW/PCM audio file `__. + Writing these bytes directly to a file results in a valid `RAW/PCM audio file + `__. """ assert ( convert_rate is None or convert_rate > 0 @@ -80,11 +90,11 @@ def get_raw_data(self, convert_rate=None, convert_width=None): raw_data = self.frame_data - # make sure unsigned 8-bit audio (which uses unsigned samples) is handled like higher sample width audio (which uses signed samples) + # make sure unsigned 8-bit audio (which uses unsigned samples) is handled like + # higher sample width audio (which uses signed samples) if self.sample_width == 1: - raw_data = audioop.bias( - raw_data, 1, -128 - ) # subtract 128 from every sample to make them act like signed samples + # subtract 128 from every sample to make them act like signed samples + raw_data = audioop.bias(raw_data, 1, -128) # resample audio at the desired rate if specified if convert_rate is not None and self.sample_rate != convert_rate: @@ -99,62 +109,64 @@ def get_raw_data(self, convert_rate=None, convert_width=None): # convert samples to desired sample width if specified if convert_width is not None and self.sample_width != convert_width: - if ( - convert_width == 3 - ): # we're converting the audio into 24-bit (workaround for https://bugs.python.org/issue12866) - raw_data = audioop.lin2lin( - raw_data, self.sample_width, 4 - ) # convert audio into 32-bit first, which is always supported + # we're converting the audio into 24-bit (workaround for https://bugs.python.org/issue12866) + if convert_width == 3: + # convert audio into 32-bit first, which is always supported + raw_data = audioop.lin2lin(raw_data, self.sample_width, 4) + try: - audioop.bias( - b"", 3, 0 - ) # test whether 24-bit audio is supported (for example, ``audioop`` in Python 3.3 and below don't support sample width 3, while Python 3.4+ do) - except ( - audioop.error - ): # this version of audioop doesn't support 24-bit audio (probably Python 3.3 or less) + # test whether 24-bit audio is supported (for example, ``audioop`` + # in Python 3.3 and below don't support sample width 3, + # while Python 3.4+ does) + audioop.bias(b"", 3, 0) + + # this version of audioop doesn't support 24-bit audio (probably + # Python 3.3 or less) + except audioop.error: + # since we're in little endian, we discard the first byte from each + # 32-bit sample to get a 24-bit sample raw_data = b"".join( - raw_data[i + 1 : i + 4] - for i in range(0, len(raw_data), 4) - ) # since we're in little endian, we discard the first byte from each 32-bit sample to get a 24-bit sample + raw_data[i + 1 : i + 4] for i in range(0, len(raw_data), 4) + ) else: # 24-bit audio fully supported, we don't need to shim anything raw_data = audioop.lin2lin( raw_data, self.sample_width, convert_width ) else: - raw_data = audioop.lin2lin( - raw_data, self.sample_width, convert_width - ) + raw_data = audioop.lin2lin(raw_data, self.sample_width, convert_width) - # if the output is 8-bit audio with unsigned samples, convert the samples we've been treating as signed to unsigned again + # if the output is 8-bit audio with unsigned samples, convert the samples we've + # been treating as signed to unsigned again if convert_width == 1: - raw_data = audioop.bias( - raw_data, 1, 128 - ) # add 128 to every sample to make them act like unsigned samples again + # add 128 to every sample to make them act like unsigned samples again + raw_data = audioop.bias(raw_data, 1, 128) return raw_data - def get_wav_data(self, convert_rate=None, convert_width=None, nchannels = 1): + def get_wav_data(self, convert_rate=None, convert_width=None, nchannels=1): """ - Returns a byte string representing the contents of a WAV file containing the audio represented by the ``AudioData`` instance. + Returns a byte string representing the contents of a WAV file containing the + audio represented by the ``AudioData`` instance. - If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match. + If ``convert_width`` is specified and the audio samples are not + ``convert_width`` bytes each, the resulting audio is converted to match. - If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match. + If ``convert_rate`` is specified and the audio sample rate is not + ``convert_rate`` Hz, the resulting audio is resampled to match. Writing these bytes directly to a file results in a valid `WAV file `__. """ raw_data = self.get_raw_data(convert_rate, convert_width) - sample_rate = ( - self.sample_rate if convert_rate is None else convert_rate - ) - sample_width = ( - self.sample_width if convert_width is None else convert_width - ) + sample_rate = self.sample_rate if convert_rate is None else convert_rate + sample_width = self.sample_width if convert_width is None else convert_width # generate the WAV file contents with io.BytesIO() as wav_file: wav_writer = wave.open(wav_file, "wb") - try: # note that we can't use context manager, since that was only added in Python 3.4 + + # note that we can't use context manager, + # since that was only added in Python 3.4 + try: wav_writer.setframerate(sample_rate) wav_writer.setsampwidth(sample_width) wav_writer.setnchannels(nchannels) @@ -166,28 +178,30 @@ def get_wav_data(self, convert_rate=None, convert_width=None, nchannels = 1): def get_aiff_data(self, convert_rate=None, convert_width=None): """ - Returns a byte string representing the contents of an AIFF-C file containing the audio represented by the ``AudioData`` instance. + Returns a byte string representing the contents of an AIFF-C file containing + the audio represented by the ``AudioData`` instance. - If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match. + If ``convert_width`` is specified and the audio samples are not + ``convert_width`` bytes each, the resulting audio is converted to match. - If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match. + If ``convert_rate`` is specified and the audio sample rate is not + ``convert_rate`` Hz, the resulting audio is resampled to match. Writing these bytes directly to a file results in a valid `AIFF-C file `__. """ raw_data = self.get_raw_data(convert_rate, convert_width) - sample_rate = ( - self.sample_rate if convert_rate is None else convert_rate - ) - sample_width = ( - self.sample_width if convert_width is None else convert_width - ) + sample_rate = self.sample_rate if convert_rate is None else convert_rate + sample_width = self.sample_width if convert_width is None else convert_width - # the AIFF format is big-endian, so we need to convert the little-endian raw data to big-endian - if hasattr( - audioop, "byteswap" - ): # ``audioop.byteswap`` was only added in Python 3.4 + # the AIFF format is big-endian, so we need to convert the little-endian + # raw data to big-endian + + # ``audioop.byteswap`` was only added in Python 3.4 + if hasattr(audioop, "byteswap"): raw_data = audioop.byteswap(raw_data, sample_width) - else: # manually reverse the bytes of each sample, which is slower but works well enough as a fallback + else: + # manually reverse the bytes of each sample, + # which is slower but works well enough as a fallback raw_data = raw_data[sample_width - 1 :: -1] + b"".join( raw_data[i + sample_width : i : -1] for i in range(sample_width - 1, len(raw_data), sample_width) @@ -196,7 +210,9 @@ def get_aiff_data(self, convert_rate=None, convert_width=None): # generate the AIFF-C file contents with io.BytesIO() as aiff_file: aiff_writer = aifc.open(aiff_file, "wb") - try: # note that we can't use context manager, since that was only added in Python 3.4 + # note that we can't use context manager, + # since that was only added in Python 3.4 + try: aiff_writer.setframerate(sample_rate) aiff_writer.setsampwidth(sample_width) aiff_writer.setnchannels(1) @@ -208,13 +224,18 @@ def get_aiff_data(self, convert_rate=None, convert_width=None): def get_flac_data(self, convert_rate=None, convert_width=None): """ - Returns a byte string representing the contents of a FLAC file containing the audio represented by the ``AudioData`` instance. + Returns a byte string representing the contents of a FLAC file containing the + audio represented by the ``AudioData`` instance. - Note that 32-bit FLAC is not supported. If the audio data is 32-bit and ``convert_width`` is not specified, then the resulting FLAC will be a 24-bit FLAC. + Note that 32-bit FLAC is not supported. If the audio data is 32-bit and + ``convert_width`` is not specified, then the resulting FLAC will be a 24-bit + FLAC. - If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match. + If ``convert_rate`` is specified and the audio sample rate is not + ``convert_rate`` Hz, the resulting audio is resampled to match. - If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match. + If ``convert_width`` is specified and the audio samples are not + ``convert_width`` bytes each, the resulting audio is converted to match. Writing these bytes directly to a file results in a valid `FLAC file `__. """ @@ -222,31 +243,34 @@ def get_flac_data(self, convert_rate=None, convert_width=None): convert_width % 1 == 0 and 1 <= convert_width <= 3 ), "Sample width to convert to must be between 1 and 3 inclusive" - if ( - self.sample_width > 3 and convert_width is None - ): # resulting WAV data would be 32-bit, which is not convertable to FLAC using our encoder - convert_width = 3 # the largest supported sample width is 24-bit, so we'll limit the sample width to that + # resulting WAV data would be 32-bit, + # which is not convertable to FLAC using our encoder + if self.sample_width > 3 and convert_width is None: + # the largest supported sample width is 24-bit, + # so we'll limit the sample width to that + convert_width = 3 # run the FLAC converter with the WAV data to get the FLAC data wav_data = self.get_wav_data(convert_rate, convert_width) flac_converter = get_flac_converter() - if ( - os.name == "nt" - ): # on Windows, specify that the process is to be started without showing a console window + + # on Windows, specify that the process is + # to be started without showing a console window + if os.name == "nt": startup_info = subprocess.STARTUPINFO() - startup_info.dwFlags |= ( - subprocess.STARTF_USESHOWWINDOW - ) # specify that the wShowWindow field of `startup_info` contains a value - startup_info.wShowWindow = ( - subprocess.SW_HIDE - ) # specify that the console window should be hidden + # specify that the wShowWindow field of `startup_info` contains a value + startup_info.dwFlags |= subprocess.STARTF_USESHOWWINDOW + # specify that the console window should be hidden + startup_info.wShowWindow = subprocess.SW_HIDE else: startup_info = None # default startupinfo process = subprocess.Popen( [ flac_converter, "--stdout", - "--totally-silent", # put the resulting FLAC file in stdout, and make sure it's not mixed with any program output + # put the resulting FLAC file in stdout, + # and make sure it's not mixed with any program output + "--totally-silent", "--best", # highest level of compression available "-", # the input FLAC file contents will be given in stdin ], @@ -259,12 +283,13 @@ def get_flac_data(self, convert_rate=None, convert_width=None): def get_flac_converter(): - """Returns the absolute path of a FLAC converter executable, or raises an OSError if none can be found.""" + """Returns the absolute path of a FLAC converter executable, or raises an OSError + if none can be found.""" flac_converter = shutil_which("flac") # check for installed version first if flac_converter is None: # flac utility is not installed - base_path = os.path.dirname( - os.path.abspath(__file__) - ) # directory of the current module file, where all the FLAC bundled binaries are stored + # directory of the current module file, + # where all the FLAC bundled binaries are stored + base_path = os.path.dirname(os.path.abspath(__file__)) system, machine = platform.system(), platform.machine() if system == "Windows" and machine in { "i686", @@ -288,7 +313,9 @@ def get_flac_converter(): flac_converter = os.path.join(base_path, "flac-linux-x86_64") else: # no FLAC converter available raise OSError( - "FLAC conversion utility not available - consider installing the FLAC command line application by running `apt-get install flac` or your operating system's equivalent" + "FLAC conversion utility not available - consider installing the " + "FLAC command line application by running `apt-get install flac` " + "or your operating system's equivalent" ) # mark FLAC converter as executable if possible diff --git a/custom_speech_recognition/recognizers/whisper.py b/custom_speech_recognition/recognizers/whisper.py index 98f76ef..0ea5395 100644 --- a/custom_speech_recognition/recognizers/whisper.py +++ b/custom_speech_recognition/recognizers/whisper.py @@ -9,19 +9,22 @@ def recognize_whisper_api( recognizer, - audio_data: "AudioData", + audio_data: AudioData, *, model: str = "whisper-1", api_key: str | None = None, ): """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the OpenAI Whisper API. + Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), + using the OpenAI Whisper API. - This function requires an OpenAI account; visit https://platform.openai.com/signup, then generate API Key in `User settings `__. + This function requires an OpenAI account; visit https://platform.openai.com/signup, + then generate API Key in `User settings `__. Detail: https://platform.openai.com/docs/guides/speech-to-text - Raises a ``speech_recognition.exceptions.SetupError`` exception if there are any issues with the openai installation, or the environment variable is missing. + Raises a ``speech_recognition.exceptions.SetupError`` exception if there are any + issues with the openai installation, or the environment variable is missing. """ if not isinstance(audio_data, AudioData): raise ValueError("``audio_data`` must be an ``AudioData`` instance") diff --git a/main.py b/main.py index f0de8e6..ad399bd 100644 --- a/main.py +++ b/main.py @@ -1,24 +1,34 @@ -import threading -from AudioTranscriber import AudioTranscriber -from GPTResponder import GPTResponder -import customtkinter as ctk -import AudioRecorder import queue -import time -import torch import sys +import threading +import time + +import customtkinter as ctk + +import AudioRecorder import TranscriberModels +from AudioTranscriber import AudioTranscriber +from GPTResponder import GPTResponder + def write_in_textbox(textbox, text): textbox.delete("0.0", "end") textbox.insert("0.0", text) + def update_transcript_UI(transcriber, textbox): transcript_string = transcriber.get_transcript() write_in_textbox(textbox, transcript_string) textbox.after(300, update_transcript_UI, transcriber, textbox) -def update_response_UI(responder, textbox, update_interval_slider_label, update_interval_slider, freeze_state): + +def update_response_UI( + responder, + textbox, + update_interval_slider_label, + update_interval_slider, + freeze_state, +): if not freeze_state[0]: response = responder.response @@ -28,45 +38,78 @@ def update_response_UI(responder, textbox, update_interval_slider_label, update_ update_interval = int(update_interval_slider.get()) responder.update_response_interval(update_interval) - update_interval_slider_label.configure(text=f"Update interval: {update_interval} seconds") + update_interval_slider_label.configure( + text=f"Update interval: {update_interval} seconds" + ) + + textbox.after( + 300, + update_response_UI, + responder, + textbox, + update_interval_slider_label, + update_interval_slider, + freeze_state, + ) - textbox.after(300, update_response_UI, responder, textbox, update_interval_slider_label, update_interval_slider, freeze_state) def clear_context(transcriber, audio_queue): transcriber.clear_transcript_data() with audio_queue.mutex: audio_queue.queue.clear() + def create_ui_components(root): ctk.set_appearance_mode("dark") ctk.set_default_color_theme("dark-blue") root.title("Ecoute") - root.configure(bg='#252422') + root.configure(bg="#252422") root.geometry("1000x600") font_size = 20 - transcript_textbox = ctk.CTkTextbox(root, width=300, font=("Arial", font_size), text_color='#FFFCF2', wrap="word") + transcript_textbox = ctk.CTkTextbox( + root, width=300, font=("Arial", font_size), text_color="#FFFCF2", wrap="word" + ) transcript_textbox.grid(row=0, column=0, padx=10, pady=20, sticky="nsew") - response_textbox = ctk.CTkTextbox(root, width=300, font=("Arial", font_size), text_color='#639cdc', wrap="word") + response_textbox = ctk.CTkTextbox( + root, width=300, font=("Arial", font_size), text_color="#639cdc", wrap="word" + ) response_textbox.grid(row=0, column=1, padx=10, pady=20, sticky="nsew") freeze_button = ctk.CTkButton(root, text="Freeze", command=None) freeze_button.grid(row=1, column=1, padx=10, pady=3, sticky="nsew") - update_interval_slider_label = ctk.CTkLabel(root, text=f"", font=("Arial", 12), text_color="#FFFCF2") + update_interval_slider_label = ctk.CTkLabel( + root, text="", font=("Arial", 12), text_color="#FFFCF2" + ) update_interval_slider_label.grid(row=2, column=1, padx=10, pady=3, sticky="nsew") - update_interval_slider = ctk.CTkSlider(root, from_=1, to=10, width=300, height=20, number_of_steps=9) + update_interval_slider = ctk.CTkSlider( + root, from_=1, to=10, width=300, height=20, number_of_steps=9 + ) update_interval_slider.set(2) update_interval_slider.grid(row=3, column=1, padx=10, pady=10, sticky="nsew") - return transcript_textbox, response_textbox, update_interval_slider, update_interval_slider_label, freeze_button + return ( + transcript_textbox, + response_textbox, + update_interval_slider, + update_interval_slider_label, + freeze_button, + ) + def main(): root = ctk.CTk() - transcript_textbox, response_textbox, update_interval_slider, update_interval_slider_label, freeze_button = create_ui_components(root) + ( + transcript_textbox, + response_textbox, + update_interval_slider, + update_interval_slider_label, + freeze_button, + ) = create_ui_components(root) audio_queue = queue.Queue() @@ -78,15 +121,21 @@ def main(): speaker_audio_recorder = AudioRecorder.DefaultSpeakerRecorder() speaker_audio_recorder.record_into_queue(audio_queue) - model = TranscriberModels.get_model('--api' in sys.argv) + model = TranscriberModels.get_model("--api" in sys.argv) - transcriber = AudioTranscriber(user_audio_recorder.source, speaker_audio_recorder.source, model) - transcribe = threading.Thread(target=transcriber.transcribe_audio_queue, args=(audio_queue,)) + transcriber = AudioTranscriber( + user_audio_recorder.source, speaker_audio_recorder.source, model + ) + transcribe = threading.Thread( + target=transcriber.transcribe_audio_queue, args=(audio_queue,) + ) transcribe.daemon = True transcribe.start() responder = GPTResponder() - respond = threading.Thread(target=responder.respond_to_transcriber, args=(transcriber,)) + respond = threading.Thread( + target=responder.respond_to_transcriber, args=(transcriber,) + ) respond.daemon = True respond.start() @@ -99,23 +148,42 @@ def main(): root.grid_columnconfigure(0, weight=2) root.grid_columnconfigure(1, weight=1) - # Add the clear transcript button to the UI - clear_transcript_button = ctk.CTkButton(root, text="Clear Transcript", command=lambda: clear_context(transcriber, audio_queue, )) + # Add the clear transcript button to the UI + clear_transcript_button = ctk.CTkButton( + root, + text="Clear Transcript", + command=lambda: clear_context( + transcriber, + audio_queue, + ), + ) clear_transcript_button.grid(row=1, column=0, padx=10, pady=3, sticky="nsew") - freeze_state = [False] # Using list to be able to change its content inside inner functions + freeze_state = [ + False + ] # Using list to be able to change its content inside inner functions + def freeze_unfreeze(): freeze_state[0] = not freeze_state[0] # Invert the freeze state freeze_button.configure(text="Unfreeze" if freeze_state[0] else "Freeze") freeze_button.configure(command=freeze_unfreeze) - update_interval_slider_label.configure(text=f"Update interval: {update_interval_slider.get()} seconds") + update_interval_slider_label.configure( + text=f"Update interval: {update_interval_slider.get()} seconds" + ) update_transcript_UI(transcriber, transcript_textbox) - update_response_UI(responder, response_textbox, update_interval_slider_label, update_interval_slider, freeze_state) - + update_response_UI( + responder, + response_textbox, + update_interval_slider_label, + update_interval_slider, + freeze_state, + ) + root.mainloop() + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/prompts.py b/prompts.py index 5145b91..efcb0d6 100644 --- a/prompts.py +++ b/prompts.py @@ -1,7 +1,13 @@ INITIAL_RESPONSE = "Welcome to Ecoute 👋" + + def create_prompt(transcript): - return f"""You are a casual pal, genuinely interested in the conversation at hand. A poor transcription of conversation is given below. + return f"""You are a casual pal, genuinely interested in the conversation at hand. +A poor transcription of conversation is given below. {transcript}. -Please respond, in detail, to the conversation. Confidently give a straightforward response to the speaker, even if you don't understand them. Give your response in square brackets. DO NOT ask to repeat, and DO NOT ask for clarification. Just answer the speaker directly.""" \ No newline at end of file +Please respond, in detail, to the conversation. Confidently give a straightforward +response to the speaker, even if you don't understand them. Give your response in +square brackets. DO NOT ask to repeat, and DO NOT ask for clarification. +Just answer the speaker directly.""" diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..c351d70 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,8 @@ +[tool.ruff] +select = [ + # "E", # pycodestyle + "F", # pyflakes + "I", # isort + "UP", # pyupgrade +] +src = ["."] diff --git a/requirements.txt b/requirements.txt index 78f1554..49c0397 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,6 @@ +black +ruff +pip-audit numpy==1.24.3 openai-whisper==20230314 Wave==0.0.2 From fff97658723759e532741f5c7d7df568d10340e5 Mon Sep 17 00:00:00 2001 From: Daniel Zarifpour Date: Thu, 1 Jun 2023 17:06:47 -0400 Subject: [PATCH 2/5] refactor(custom_speech_recognition): revert formatting --- custom_speech_recognition/__init__.py | 2049 +++++------------ custom_speech_recognition/__main__.py | 17 +- custom_speech_recognition/audio.py | 211 +- .../recognizers/whisper.py | 11 +- 4 files changed, 705 insertions(+), 1583 deletions(-) diff --git a/custom_speech_recognition/__init__.py b/custom_speech_recognition/__init__.py index 8af9146..1d339b0 100644 --- a/custom_speech_recognition/__init__.py +++ b/custom_speech_recognition/__init__.py @@ -1,25 +1,24 @@ #!/usr/bin/env python3 -"""Library for performing speech recognition, with support for several engines and APIs, -online and offline.""" +"""Library for performing speech recognition, with support for several engines and APIs, online and offline.""" +import io +import os +import tempfile +import sys +import subprocess +import wave import aifc +import math import audioop -import base64 import collections -import hashlib -import hmac -import io import json -import math -import os -import subprocess -import sys -import tempfile +import base64 import threading +import hashlib +import hmac import time import uuid -import wave try: import requests @@ -30,122 +29,71 @@ __version__ = "3.10.0" __license__ = "BSD" -from urllib.error import HTTPError, URLError from urllib.parse import urlencode from urllib.request import Request, urlopen +from urllib.error import URLError, HTTPError from .audio import AudioData, get_flac_converter from .exceptions import ( RequestError, - TranscriptionFailed, + TranscriptionFailed, TranscriptionNotReady, UnknownValueError, WaitTimeoutError, ) from .recognizers import whisper -AUDIO_DATA_MUST_BE_AUDIO_DATA = "``audio_data`` must be audio data" -LANGUAGE_MUST_BE_STRING = "``language`` must be a string" -KEY_MUST_BE_STRING = "``key`` must be a string" - -NOT_IMPLEMENTED_ERROR_ABSTRACT_CLASS = "this is an abstract class" - -class AudioSource: +class AudioSource(object): def __init__(self): - raise NotImplementedError(NOT_IMPLEMENTED_ERROR_ABSTRACT_CLASS) + raise NotImplementedError("this is an abstract class") def __enter__(self): - raise NotImplementedError(NOT_IMPLEMENTED_ERROR_ABSTRACT_CLASS) + raise NotImplementedError("this is an abstract class") def __exit__(self, exc_type, exc_value, traceback): - raise NotImplementedError(NOT_IMPLEMENTED_ERROR_ABSTRACT_CLASS) + raise NotImplementedError("this is an abstract class") class Microphone(AudioSource): """ - Creates a new ``Microphone`` instance, which represents a physical microphone on the - computer. Subclass of ``AudioSource``. + Creates a new ``Microphone`` instance, which represents a physical microphone on the computer. Subclass of ``AudioSource``. - This will throw an ``AttributeError`` if you don't have PyAudio 0.2.11 or later - installed. + This will throw an ``AttributeError`` if you don't have PyAudio 0.2.11 or later installed. - If ``device_index`` is unspecified or ``None``, the default microphone is used as - the audio source. Otherwise, ``device_index`` should be the index of the device to - use for audio input. + If ``device_index`` is unspecified or ``None``, the default microphone is used as the audio source. Otherwise, ``device_index`` should be the index of the device to use for audio input. - A device index is an integer between 0 and ``pyaudio.get_device_count() - 1`` - (assume we have used ``import pyaudio`` beforehand) inclusive. It represents an - audio device such as a microphone or speaker. See the `PyAudio documentation - `__ for more details. + A device index is an integer between 0 and ``pyaudio.get_device_count() - 1`` (assume we have used ``import pyaudio`` beforehand) inclusive. It represents an audio device such as a microphone or speaker. See the `PyAudio documentation `__ for more details. - The microphone audio is recorded in chunks of ``chunk_size`` samples, at a rate of - ``sample_rate`` samples per second (Hertz). If not specified, the value of - ``sample_rate`` is determined automatically from the system's microphone settings. + The microphone audio is recorded in chunks of ``chunk_size`` samples, at a rate of ``sample_rate`` samples per second (Hertz). If not specified, the value of ``sample_rate`` is determined automatically from the system's microphone settings. - Higher ``sample_rate`` values result in better audio quality, but also more - bandwidth (and therefore, slower recognition). Additionally, some CPUs, such as - those in older Raspberry Pi models, can't keep up if this value is too high. + Higher ``sample_rate`` values result in better audio quality, but also more bandwidth (and therefore, slower recognition). Additionally, some CPUs, such as those in older Raspberry Pi models, can't keep up if this value is too high. - Higher ``chunk_size`` values help avoid triggering on rapidly changing ambient - noise, but also makes detection less sensitive. This value, generally, should be - left at its default. + Higher ``chunk_size`` values help avoid triggering on rapidly changing ambient noise, but also makes detection less sensitive. This value, generally, should be left at its default. """ - - def __init__( - self, - device_index=None, - sample_rate=None, - chunk_size=1024, - speaker=False, - channels=1, - ): - assert device_index is None or isinstance( - device_index, int - ), "Device index must be None or an integer" - assert sample_rate is None or ( - isinstance(sample_rate, int) and sample_rate > 0 - ), "Sample rate must be None or a positive integer" - assert ( - isinstance(chunk_size, int) and chunk_size > 0 - ), "Chunk size must be a positive integer" + def __init__(self, device_index=None, sample_rate=None, chunk_size=1024, speaker=False, channels = 1): + assert device_index is None or isinstance(device_index, int), "Device index must be None or an integer" + assert sample_rate is None or (isinstance(sample_rate, int) and sample_rate > 0), "Sample rate must be None or a positive integer" + assert isinstance(chunk_size, int) and chunk_size > 0, "Chunk size must be a positive integer" # set up PyAudio - self.speaker = speaker + self.speaker=speaker self.pyaudio_module = self.get_pyaudio() audio = self.pyaudio_module.PyAudio() try: count = audio.get_device_count() # obtain device count - - # ensure device index is in range - if device_index is not None: - assert 0 <= device_index < count, ( - "Device index out of range ({} devices available; " - "device index should be between 0 and {} inclusive)" - ).format(count, count - 1) - - # automatically set the sample rate to the hardware's default sample rate if - # not specified - if sample_rate is None: - device_info = ( - audio.get_device_info_by_index(device_index) - if device_index is not None - else audio.get_default_input_device_info() - ) - assert ( - isinstance(device_info.get("defaultSampleRate"), float | int) - and device_info["defaultSampleRate"] > 0 - ), f"Invalid device info returned from PyAudio: {device_info}" + if device_index is not None: # ensure device index is in range + assert 0 <= device_index < count, "Device index out of range ({} devices available; device index should be between 0 and {} inclusive)".format(count, count - 1) + if sample_rate is None: # automatically set the sample rate to the hardware's default sample rate if not specified + device_info = audio.get_device_info_by_index(device_index) if device_index is not None else audio.get_default_input_device_info() + assert isinstance(device_info.get("defaultSampleRate"), (float, int)) and device_info["defaultSampleRate"] > 0, "Invalid device info returned from PyAudio: {}".format(device_info) sample_rate = int(device_info["defaultSampleRate"]) finally: audio.terminate() self.device_index = device_index self.format = self.pyaudio_module.paInt16 # 16-bit int sampling - - # size of each sample - self.SAMPLE_WIDTH = self.pyaudio_module.get_sample_size(self.format) - + self.SAMPLE_WIDTH = self.pyaudio_module.get_sample_size(self.format) # size of each sample self.SAMPLE_RATE = sample_rate # sampling rate in Hertz self.CHUNK = chunk_size # number of frames stored in each buffer self.channels = channels @@ -156,32 +104,23 @@ def __init__( @staticmethod def get_pyaudio(): """ - Imports the pyaudio module and checks its version. Throws exceptions if pyaudio - can't be found or a wrong version is installed + Imports the pyaudio module and checks its version. Throws exceptions if pyaudio can't be found or a wrong version is installed """ try: import pyaudiowpatch as pyaudio except ImportError: raise AttributeError("Could not find PyAudio; check installation") from distutils.version import LooseVersion - if LooseVersion(pyaudio.__version__) < LooseVersion("0.2.11"): - raise AttributeError( - "PyAudio 0.2.11 or later is required (found version {})".format( - pyaudio.__version__ - ) - ) + raise AttributeError("PyAudio 0.2.11 or later is required (found version {})".format(pyaudio.__version__)) return pyaudio @staticmethod def list_microphone_names(): """ - Returns a list of the names of all available microphones. For microphones where - the name can't be retrieved, the list entry contains ``None`` instead. + Returns a list of the names of all available microphones. For microphones where the name can't be retrieved, the list entry contains ``None`` instead. - The index of each microphone's name in the returned list is the same as its - device index when creating a ``Microphone`` instance - if you want to use the - microphone at index 3 in the returned list, use ``Microphone(device_index=3)``. + The index of each microphone's name in the returned list is the same as its device index when creating a ``Microphone`` instance - if you want to use the microphone at index 3 in the returned list, use ``Microphone(device_index=3)``. """ audio = Microphone.get_pyaudio().PyAudio() try: @@ -196,15 +135,9 @@ def list_microphone_names(): @staticmethod def list_working_microphones(): """ - Returns a dictionary mapping device indices to microphone names, for microphones - that are currently hearing sounds. When using this function, ensure that your - microphone is unmuted and make some noise at it to ensure it will be detected as - working. - - Each key in the returned dictionary can be passed to the ``Microphone`` - constructor to use that microphone. For example, if the return value is - ``{3: "HDA Intel PCH: ALC3232 Analog (hw:1,0)"}``, you can do - ``Microphone(device_index=3)`` to use that microphone. + Returns a dictionary mapping device indices to microphone names, for microphones that are currently hearing sounds. When using this function, ensure that your microphone is unmuted and make some noise at it to ensure it will be detected as working. + + Each key in the returned dictionary can be passed to the ``Microphone`` constructor to use that microphone. For example, if the return value is ``{3: "HDA Intel PCH: ALC3232 Analog (hw:1,0)"}``, you can do ``Microphone(device_index=3)`` to use that microphone. """ pyaudio_module = Microphone.get_pyaudio() audio = pyaudio_module.PyAudio() @@ -213,37 +146,25 @@ def list_working_microphones(): for device_index in range(audio.get_device_count()): device_info = audio.get_device_info_by_index(device_index) device_name = device_info.get("name") - assert ( - isinstance(device_info.get("defaultSampleRate"), float | int) - and device_info["defaultSampleRate"] > 0 - ), f"Invalid device info returned from PyAudio: {device_info}" + assert isinstance(device_info.get("defaultSampleRate"), (float, int)) and device_info["defaultSampleRate"] > 0, "Invalid device info returned from PyAudio: {}".format(device_info) try: # read audio pyaudio_stream = audio.open( - input_device_index=device_index, - channels=1, - format=pyaudio_module.paInt16, - rate=int(device_info["defaultSampleRate"]), - input=True, + input_device_index=device_index, channels=1, format=pyaudio_module.paInt16, + rate=int(device_info["defaultSampleRate"]), input=True ) try: - audio_buffer = pyaudio_stream.read(1024) - if not pyaudio_stream.is_stopped(): - pyaudio_stream.stop_stream() + buffer = pyaudio_stream.read(1024) + if not pyaudio_stream.is_stopped(): pyaudio_stream.stop_stream() finally: pyaudio_stream.close() except Exception: continue # compute RMS of debiased audio - energy = -audioop.rms(audio_buffer, 2) + energy = -audioop.rms(buffer, 2) energy_bytes = bytes([energy & 0xFF, (energy >> 8) & 0xFF]) - debiased_energy = audioop.rms( - audioop.add( - audio_buffer, energy_bytes * (len(audio_buffer) // 2), 2 - ), - 2, - ) + debiased_energy = audioop.rms(audioop.add(buffer, energy_bytes * (len(buffer) // 2), 2), 2) if debiased_energy > 30: # probably actually audio result[device_index] = device_name @@ -252,9 +173,7 @@ def list_working_microphones(): return result def __enter__(self): - assert ( - self.stream is None - ), "This audio source is already inside a context manager" + assert self.stream is None, "This audio source is already inside a context manager" self.audio = self.pyaudio_module.PyAudio() try: @@ -267,18 +186,14 @@ def __enter__(self): format=self.format, rate=self.SAMPLE_RATE, frames_per_buffer=self.CHUNK, - input=True, + input=True ) ) else: self.stream = Microphone.MicrophoneStream( self.audio.open( - input_device_index=self.device_index, - channels=1, - format=self.format, - rate=self.SAMPLE_RATE, - frames_per_buffer=self.CHUNK, - input=True, + input_device_index=self.device_index, channels=1, format=self.format, + rate=self.SAMPLE_RATE, frames_per_buffer=self.CHUNK, input=True, ) ) except Exception: @@ -292,7 +207,7 @@ def __exit__(self, exc_type, exc_value, traceback): self.stream = None self.audio.terminate() - class MicrophoneStream: + class MicrophoneStream(object): def __init__(self, pyaudio_stream): self.pyaudio_stream = pyaudio_stream @@ -301,8 +216,7 @@ def read(self, size): def close(self): try: - # sometimes, if the stream isn't stopped, - # closing the stream throws an exception + # sometimes, if the stream isn't stopped, closing the stream throws an exception if not self.pyaudio_stream.is_stopped(): self.pyaudio_stream.stop_stream() finally: @@ -311,33 +225,21 @@ def close(self): class AudioFile(AudioSource): """ - Creates a new ``AudioFile`` instance given a WAV/AIFF/FLAC audio file - ``filename_or_fileobject``. Subclass of ``AudioSource``. + Creates a new ``AudioFile`` instance given a WAV/AIFF/FLAC audio file ``filename_or_fileobject``. Subclass of ``AudioSource``. - If ``filename_or_fileobject`` is a string, then it is interpreted as a path to an - audio file on the filesystem. Otherwise, ``filename_or_fileobject`` should be a - file-like object such as ``io.BytesIO`` or similar. + If ``filename_or_fileobject`` is a string, then it is interpreted as a path to an audio file on the filesystem. Otherwise, ``filename_or_fileobject`` should be a file-like object such as ``io.BytesIO`` or similar. - Note that functions that read from the audio (such as ``recognizer_instance.record`` - or ``recognizer_instance.listen``) will move ahead in the stream. For example, if - you execute ``recognizer_instance.record(audiofile_instance, duration=10)`` twice, - the first time it will return the first 10 seconds of audio, and the second time it - will return the 10 seconds of audio right after that. This is always reset to the - beginning when entering an ``AudioFile`` context. + Note that functions that read from the audio (such as ``recognizer_instance.record`` or ``recognizer_instance.listen``) will move ahead in the stream. For example, if you execute ``recognizer_instance.record(audiofile_instance, duration=10)`` twice, the first time it will return the first 10 seconds of audio, and the second time it will return the 10 seconds of audio right after that. This is always reset to the beginning when entering an ``AudioFile`` context. - WAV files must be in PCM/LPCM format; WAVE_FORMAT_EXTENSIBLE and compressed WAV are - not supported and may result in undefined behaviour. + WAV files must be in PCM/LPCM format; WAVE_FORMAT_EXTENSIBLE and compressed WAV are not supported and may result in undefined behaviour. Both AIFF and AIFF-C (compressed AIFF) formats are supported. - FLAC files must be in native FLAC format; OGG-FLAC is not supported and may result - in undefined behaviour. + FLAC files must be in native FLAC format; OGG-FLAC is not supported and may result in undefined behaviour. """ def __init__(self, filename_or_fileobject): - assert isinstance(filename_or_fileobject, str) or hasattr( - filename_or_fileobject, "read" - ), "Given audio file must be a filename string or a file-like object" + assert isinstance(filename_or_fileobject, (type(""), type(u""))) or hasattr(filename_or_fileobject, "read"), "Given audio file must be a filename string or a file-like object" self.filename_or_fileobject = filename_or_fileobject self.stream = None self.DURATION = None @@ -349,15 +251,11 @@ def __init__(self, filename_or_fileobject): self.FRAME_COUNT = None def __enter__(self): - assert ( - self.stream is None - ), "This audio source is already inside a context manager" + assert self.stream is None, "This audio source is already inside a context manager" try: # attempt to read the file as WAV self.audio_reader = wave.open(self.filename_or_fileobject, "rb") - self.little_endian = True # RIFF WAV is a little-endian format - # (most ``audioop`` operations assume that the frames are stored in - # little-endian form) + self.little_endian = True # RIFF WAV is a little-endian format (most ``audioop`` operations assume that the frames are stored in little-endian form) except (wave.Error, EOFError): try: # attempt to read the file as AIFF @@ -368,187 +266,102 @@ def __enter__(self): if hasattr(self.filename_or_fileobject, "read"): flac_data = self.filename_or_fileobject.read() else: - with open(self.filename_or_fileobject, "rb") as f: - flac_data = f.read() + with open(self.filename_or_fileobject, "rb") as f: flac_data = f.read() # run the FLAC converter with the FLAC data to get the AIFF data flac_converter = get_flac_converter() - # on Windows, specify that the process is to be started without showing - # a console window - if os.name == "nt": + if os.name == "nt": # on Windows, specify that the process is to be started without showing a console window startup_info = subprocess.STARTUPINFO() - # specify that the wShowWindow field of - # `startup_info` contains a value - startup_info.dwFlags |= subprocess.STARTF_USESHOWWINDOW - # specify that the console window should be hidden - startup_info.wShowWindow = subprocess.SW_HIDE + startup_info.dwFlags |= subprocess.STARTF_USESHOWWINDOW # specify that the wShowWindow field of `startup_info` contains a value + startup_info.wShowWindow = subprocess.SW_HIDE # specify that the console window should be hidden else: startup_info = None # default startupinfo - process = subprocess.Popen( - [ - flac_converter, - "--stdout", - # put the resulting AIFF file in stdout, - # and make sure it's not mixed with any program output - "--totally-silent", - "--decode", - "--force-aiff-format", # decode the FLAC file into an AIFF file - "-", # the input FLAC file contents will be given in stdin - ], - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - startupinfo=startup_info, - ) + process = subprocess.Popen([ + flac_converter, + "--stdout", "--totally-silent", # put the resulting AIFF file in stdout, and make sure it's not mixed with any program output + "--decode", "--force-aiff-format", # decode the FLAC file into an AIFF file + "-", # the input FLAC file contents will be given in stdin + ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, startupinfo=startup_info) aiff_data, _ = process.communicate(flac_data) aiff_file = io.BytesIO(aiff_data) try: self.audio_reader = aifc.open(aiff_file, "rb") except (aifc.Error, EOFError): - raise ValueError( - "Audio file could not be read as PCM WAV, AIFF/AIFF-C, or " - "Native FLAC; check if file is corrupted or in another format" - ) + raise ValueError("Audio file could not be read as PCM WAV, AIFF/AIFF-C, or Native FLAC; check if file is corrupted or in another format") self.little_endian = False # AIFF is a big-endian format - assert ( - 1 <= self.audio_reader.getnchannels() <= 2 - ), "Audio must be mono or stereo" + assert 1 <= self.audio_reader.getnchannels() <= 2, "Audio must be mono or stereo" self.SAMPLE_WIDTH = self.audio_reader.getsampwidth() - # 24-bit audio needs some special handling for old Python versions (workaround - # for https://bugs.python.org/issue12866) + # 24-bit audio needs some special handling for old Python versions (workaround for https://bugs.python.org/issue12866) samples_24_bit_pretending_to_be_32_bit = False if self.SAMPLE_WIDTH == 3: # 24-bit audio - try: - # test whether this sample width is supported (for example, ``audioop`` - # in Python 3.3 and below don't support sample width 3, - # while Python 3.4+ do) - audioop.bias(b"", self.SAMPLE_WIDTH, 0) - - # this version of audioop doesn't support - # 24-bit audio (probably Python 3.3 or less) - except audioop.error: - # while the ``AudioFile`` instance will outwardly appear to be 32-bit, - # it will actually internally be 24-bit - samples_24_bit_pretending_to_be_32_bit = True - # the ``AudioFile`` instance should present itself as a 32-bit stream - # now, since we'll be converting into 32-bit on the fly when reading - self.SAMPLE_WIDTH = 4 + try: audioop.bias(b"", self.SAMPLE_WIDTH, 0) # test whether this sample width is supported (for example, ``audioop`` in Python 3.3 and below don't support sample width 3, while Python 3.4+ do) + except audioop.error: # this version of audioop doesn't support 24-bit audio (probably Python 3.3 or less) + samples_24_bit_pretending_to_be_32_bit = True # while the ``AudioFile`` instance will outwardly appear to be 32-bit, it will actually internally be 24-bit + self.SAMPLE_WIDTH = 4 # the ``AudioFile`` instance should present itself as a 32-bit stream now, since we'll be converting into 32-bit on the fly when reading self.SAMPLE_RATE = self.audio_reader.getframerate() self.CHUNK = 4096 self.FRAME_COUNT = self.audio_reader.getnframes() self.DURATION = self.FRAME_COUNT / float(self.SAMPLE_RATE) - self.stream = AudioFile.AudioFileStream( - self.audio_reader, - self.little_endian, - samples_24_bit_pretending_to_be_32_bit, - ) + self.stream = AudioFile.AudioFileStream(self.audio_reader, self.little_endian, samples_24_bit_pretending_to_be_32_bit) return self def __exit__(self, exc_type, exc_value, traceback): - # only close the file if it was opened by this class in the first place - # (if the file was originally given as a path) - if not hasattr(self.filename_or_fileobject, "read"): + if not hasattr(self.filename_or_fileobject, "read"): # only close the file if it was opened by this class in the first place (if the file was originally given as a path) self.audio_reader.close() self.stream = None self.DURATION = None - class AudioFileStream: - def __init__( - self, audio_reader, little_endian, samples_24_bit_pretending_to_be_32_bit - ): - # an audio file object (e.g., a `wave.Wave_read` instance) - self.audio_reader = audio_reader - # whether the audio data is little-endian (when working with big-endian - # things, we'll have to convert it to little-endian before we process it) - self.little_endian = little_endian - # this is true if the audio is 24-bit audio, but 24-bit audio isn't - # supported, so we have to pretend that this is 32-bit audio and convert it - # on the fly - self.samples_24_bit_pretending_to_be_32_bit = ( - samples_24_bit_pretending_to_be_32_bit - ) + class AudioFileStream(object): + def __init__(self, audio_reader, little_endian, samples_24_bit_pretending_to_be_32_bit): + self.audio_reader = audio_reader # an audio file object (e.g., a `wave.Wave_read` instance) + self.little_endian = little_endian # whether the audio data is little-endian (when working with big-endian things, we'll have to convert it to little-endian before we process it) + self.samples_24_bit_pretending_to_be_32_bit = samples_24_bit_pretending_to_be_32_bit # this is true if the audio is 24-bit audio, but 24-bit audio isn't supported, so we have to pretend that this is 32-bit audio and convert it on the fly def read(self, size=-1): - audio_buffer = self.audio_reader.readframes( - self.audio_reader.getnframes() if size == -1 else size - ) - if not isinstance(audio_buffer, bytes): - audio_buffer = b"" # workaround for https://bugs.python.org/issue24608 + buffer = self.audio_reader.readframes(self.audio_reader.getnframes() if size == -1 else size) + if not isinstance(buffer, bytes): buffer = b"" # workaround for https://bugs.python.org/issue24608 sample_width = self.audio_reader.getsampwidth() - # big endian format, convert to little endian on the fly - if not self.little_endian: - # ``audioop.byteswap`` was only added in Python 3.4 (incidentally, that - # also means that we don't need to worry about 24-bit audio being - # unsupported, since Python 3.4+ always has that functionality) - if hasattr(audioop, "byteswap"): - audio_buffer = audioop.byteswap(audio_buffer, sample_width) - - # manually reverse the bytes of each sample, which is slower but works - # well enough as a fallback - else: - audio_buffer = audio_buffer[sample_width - 1 :: -1] + b"".join( - audio_buffer[i + sample_width : i : -1] - for i in range( - sample_width - 1, len(audio_buffer), sample_width - ) - ) + if not self.little_endian: # big endian format, convert to little endian on the fly + if hasattr(audioop, "byteswap"): # ``audioop.byteswap`` was only added in Python 3.4 (incidentally, that also means that we don't need to worry about 24-bit audio being unsupported, since Python 3.4+ always has that functionality) + buffer = audioop.byteswap(buffer, sample_width) + else: # manually reverse the bytes of each sample, which is slower but works well enough as a fallback + buffer = buffer[sample_width - 1::-1] + b"".join(buffer[i + sample_width:i:-1] for i in range(sample_width - 1, len(buffer), sample_width)) # workaround for https://bugs.python.org/issue12866 - # we need to convert samples from 24-bit to 32-bit before we can process - # them with ``audioop`` functions - if self.samples_24_bit_pretending_to_be_32_bit: - # since we're in little endian, we prepend a zero byte to each 24-bit - # sample to get a 32-bit sample - audio_buffer = b"".join( - b"\x00" + audio_buffer[i : i + sample_width] - for i in range(0, len(audio_buffer), sample_width) - ) - # make sure we thread the buffer as 32-bit audio now, after converting - # it from 24-bit audio - sample_width = 4 + if self.samples_24_bit_pretending_to_be_32_bit: # we need to convert samples from 24-bit to 32-bit before we can process them with ``audioop`` functions + buffer = b"".join(b"\x00" + buffer[i:i + sample_width] for i in range(0, len(buffer), sample_width)) # since we're in little endian, we prepend a zero byte to each 24-bit sample to get a 32-bit sample + sample_width = 4 # make sure we thread the buffer as 32-bit audio now, after converting it from 24-bit audio if self.audio_reader.getnchannels() != 1: # stereo audio - # convert stereo audio data to mono - audio_buffer = audioop.tomono(audio_buffer, sample_width, 1, 1) - return audio_buffer + buffer = audioop.tomono(buffer, sample_width, 1, 1) # convert stereo audio data to mono + return buffer class Recognizer(AudioSource): def __init__(self): """ - Creates a new ``Recognizer`` instance, which represents a collection of speech - recognition functionality. + Creates a new ``Recognizer`` instance, which represents a collection of speech recognition functionality. """ self.energy_threshold = 300 # minimum audio energy to consider for recording self.dynamic_energy_threshold = True self.dynamic_energy_adjustment_damping = 0.15 self.dynamic_energy_ratio = 1.5 - # seconds of non-speaking audio before a phrase is considered complete - self.pause_threshold = 0.8 - # seconds after an internal operation (e.g., an API request) starts before it - # times out, or ``None`` for no timeout - self.operation_timeout = None - # minimum seconds of speaking audio before we consider the speaking audio a - # phrase - values below this are ignored (for filtering out clicks and pops) - self.phrase_threshold = 0.3 - # seconds of non-speaking audio to keep on both sides of the recording - self.non_speaking_duration = 0.5 + self.pause_threshold = 0.8 # seconds of non-speaking audio before a phrase is considered complete + self.operation_timeout = None # seconds after an internal operation (e.g., an API request) starts before it times out, or ``None`` for no timeout + + self.phrase_threshold = 0.3 # minimum seconds of speaking audio before we consider the speaking audio a phrase - values below this are ignored (for filtering out clicks and pops) + self.non_speaking_duration = 0.5 # seconds of non-speaking audio to keep on both sides of the recording def record(self, source, duration=None, offset=None): """ - Records up to ``duration`` seconds of audio from ``source`` (an ``AudioSource`` - instance) starting at ``offset`` (or at the beginning if not specified) into an - ``AudioData`` instance, which it returns. + Records up to ``duration`` seconds of audio from ``source`` (an ``AudioSource`` instance) starting at ``offset`` (or at the beginning if not specified) into an ``AudioData`` instance, which it returns. - If ``duration`` is not specified, then it will record until there is no more - audio input. + If ``duration`` is not specified, then it will record until there is no more audio input. """ assert isinstance(source, AudioSource), "Source must be an audio source" - assert source.stream is not None, ( - "Audio source must be entered before recording, see documentation for " - "``AudioSource``; are you using ``source`` outside of a ``with`` statement?" - ) + assert source.stream is not None, "Audio source must be entered before recording, see documentation for ``AudioSource``; are you using ``source`` outside of a ``with`` statement?" frames = io.BytesIO() seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE @@ -561,16 +374,14 @@ def record(self, source, duration=None, offset=None): if offset_time > offset: offset_reached = True - audio_buffer = source.stream.read(source.CHUNK) - if len(audio_buffer) == 0: - break + buffer = source.stream.read(source.CHUNK) + if len(buffer) == 0: break if offset_reached or not offset: elapsed_time += seconds_per_buffer - if duration and elapsed_time > duration: - break + if duration and elapsed_time > duration: break - frames.write(audio_buffer) + frames.write(buffer) frame_data = frames.getvalue() frames.close() @@ -578,22 +389,14 @@ def record(self, source, duration=None, offset=None): def adjust_for_ambient_noise(self, source, duration=1): """ - Adjusts the energy threshold dynamically using audio from ``source`` (an - ``AudioSource`` instance) to account for ambient noise. + Adjusts the energy threshold dynamically using audio from ``source`` (an ``AudioSource`` instance) to account for ambient noise. - Intended to calibrate the energy threshold with the ambient energy level. - Should be used on periods of audio without speech - will stop early if any - speech is detected. + Intended to calibrate the energy threshold with the ambient energy level. Should be used on periods of audio without speech - will stop early if any speech is detected. - The ``duration`` parameter is the maximum number of seconds that it will - dynamically adjust the threshold for before returning. This value should be at - least 0.5 in order to get a representative sample of the ambient noise. + The ``duration`` parameter is the maximum number of seconds that it will dynamically adjust the threshold for before returning. This value should be at least 0.5 in order to get a representative sample of the ambient noise. """ assert isinstance(source, AudioSource), "Source must be an audio source" - assert source.stream is not None, ( - "Audio source must be entered before adjusting, see documentation for " - "``AudioSource``; are you using ``source`` outside of a ``with`` statement?" - ) + assert source.stream is not None, "Audio source must be entered before adjusting, see documentation for ``AudioSource``; are you using ``source`` outside of a ``with`` statement?" assert self.pause_threshold >= self.non_speaking_duration >= 0 seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE @@ -602,40 +405,27 @@ def adjust_for_ambient_noise(self, source, duration=1): # adjust energy threshold until a phrase starts while True: elapsed_time += seconds_per_buffer - if elapsed_time > duration: - break - audio_buffer = source.stream.read(source.CHUNK) - - # energy of the audio signal - energy = audioop.rms(audio_buffer, source.SAMPLE_WIDTH) + if elapsed_time > duration: break + buffer = source.stream.read(source.CHUNK) + energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal # dynamically adjust the energy threshold using asymmetric weighted average - # account for different chunk sizes and rates - damping = self.dynamic_energy_adjustment_damping**seconds_per_buffer + damping = self.dynamic_energy_adjustment_damping ** seconds_per_buffer # account for different chunk sizes and rates target_energy = energy * self.dynamic_energy_ratio - self.energy_threshold = self.energy_threshold * damping + target_energy * ( - 1 - damping - ) + self.energy_threshold = self.energy_threshold * damping + target_energy * (1 - damping) - def snowboy_wait_for_hot_word( - self, snowboy_location, snowboy_hot_word_files, source, timeout=None - ): + def snowboy_wait_for_hot_word(self, snowboy_location, snowboy_hot_word_files, source, timeout=None): # load snowboy library (NOT THREAD SAFE) sys.path.append(snowboy_location) import snowboydetect - sys.path.pop() detector = snowboydetect.SnowboyDetect( - resource_filename=os.path.join( - snowboy_location, "resources", "common.res" - ).encode(), - model_str=",".join(snowboy_hot_word_files).encode(), + resource_filename=os.path.join(snowboy_location, "resources", "common.res").encode(), + model_str=",".join(snowboy_hot_word_files).encode() ) detector.SetAudioGain(1.0) - detector.SetSensitivity( - ",".join(["0.4"] * len(snowboy_hot_word_files)).encode() - ) + detector.SetSensitivity(",".join(["0.4"] * len(snowboy_hot_word_files)).encode()) snowboy_sample_rate = detector.SampleRate() elapsed_time = 0 @@ -644,128 +434,65 @@ def snowboy_wait_for_hot_word( # buffers capable of holding 5 seconds of original audio five_seconds_buffer_count = int(math.ceil(5 / seconds_per_buffer)) - # buffers capable of holding 0.5 seconds of resampled audio half_second_buffer_count = int(math.ceil(0.5 / seconds_per_buffer)) - frames = collections.deque(maxlen=five_seconds_buffer_count) resampled_frames = collections.deque(maxlen=half_second_buffer_count) - # snowboy check interval check_interval = 0.05 last_check = time.time() while True: elapsed_time += seconds_per_buffer if timeout and elapsed_time > timeout: - raise WaitTimeoutError( - "listening timed out while waiting for hotword to be said" - ) + raise WaitTimeoutError("listening timed out while waiting for hotword to be said") - audio_buffer = source.stream.read(source.CHUNK) - if len(audio_buffer) == 0: - break # reached end of the stream - frames.append(audio_buffer) + buffer = source.stream.read(source.CHUNK) + if len(buffer) == 0: break # reached end of the stream + frames.append(buffer) # resample audio to the required sample rate - resampled_buffer, resampling_state = audioop.ratecv( - audio_buffer, - source.SAMPLE_WIDTH, - 1, - source.SAMPLE_RATE, - snowboy_sample_rate, - resampling_state, - ) + resampled_buffer, resampling_state = audioop.ratecv(buffer, source.SAMPLE_WIDTH, 1, source.SAMPLE_RATE, snowboy_sample_rate, resampling_state) resampled_frames.append(resampled_buffer) if time.time() - last_check > check_interval: # run Snowboy on the resampled audio snowboy_result = detector.RunDetection(b"".join(resampled_frames)) - assert ( - snowboy_result != -1 - ), "Error initializing streams or reading audio data" - if snowboy_result > 0: - break # wake word found + assert snowboy_result != -1, "Error initializing streams or reading audio data" + if snowboy_result > 0: break # wake word found resampled_frames.clear() last_check = time.time() return b"".join(frames), elapsed_time - def listen( - self, source, timeout=None, phrase_time_limit=None, snowboy_configuration=None - ): + def listen(self, source, timeout=None, phrase_time_limit=None, snowboy_configuration=None): """ - Records a single phrase from ``source`` (an ``AudioSource`` instance) into an - ``AudioData`` instance, which it returns. - - This is done by waiting until the audio has an energy above - ``recognizer_instance.energy_threshold`` (the user has started speaking), and - then recording until it encounters ``recognizer_instance.pause_threshold`` - seconds of non-speaking or there is no more audio input. The ending silence is - not included. - - The ``timeout`` parameter is the maximum number of seconds that this will wait - for a phrase to start before giving up and throwing an - ``speech_recognition.WaitTimeoutError`` exception. If ``timeout`` is ``None``, - there will be no wait timeout. - - The ``phrase_time_limit`` parameter is the maximum number of seconds that this - will allow a phrase to continue before stopping and returning the part of the - phrase processed before the time limit was reached. The resulting audio will be - the phrase cut off at the time limit. If ``phrase_timeout`` is ``None``, there - will be no phrase time limit. - - The ``snowboy_configuration`` parameter allows integration with - `Snowboy `__, an offline, high-accuracy, - power-efficient hotword recognition engine. When used, this function will pause - until Snowboy detects a hotword, after which it will unpause. This parameter - should either be ``None`` to turn off Snowboy support, or a tuple of the form - ``(SNOWBOY_LOCATION, LIST_OF_HOT_WORD_FILES)``, where ``SNOWBOY_LOCATION`` is - the path to the Snowboy root directory, and ``LIST_OF_HOT_WORD_FILES`` is a list - of paths to Snowboy hotword configuration files (`*.pmdl` or `*.umdl` format). - - This operation will always complete within ``timeout + phrase_timeout`` seconds - if both are numbers, either by returning the audio data, or by raising a - ``speech_recognition.WaitTimeoutError`` exception. + Records a single phrase from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance, which it returns. + + This is done by waiting until the audio has an energy above ``recognizer_instance.energy_threshold`` (the user has started speaking), and then recording until it encounters ``recognizer_instance.pause_threshold`` seconds of non-speaking or there is no more audio input. The ending silence is not included. + + The ``timeout`` parameter is the maximum number of seconds that this will wait for a phrase to start before giving up and throwing an ``speech_recognition.WaitTimeoutError`` exception. If ``timeout`` is ``None``, there will be no wait timeout. + + The ``phrase_time_limit`` parameter is the maximum number of seconds that this will allow a phrase to continue before stopping and returning the part of the phrase processed before the time limit was reached. The resulting audio will be the phrase cut off at the time limit. If ``phrase_timeout`` is ``None``, there will be no phrase time limit. + + The ``snowboy_configuration`` parameter allows integration with `Snowboy `__, an offline, high-accuracy, power-efficient hotword recognition engine. When used, this function will pause until Snowboy detects a hotword, after which it will unpause. This parameter should either be ``None`` to turn off Snowboy support, or a tuple of the form ``(SNOWBOY_LOCATION, LIST_OF_HOT_WORD_FILES)``, where ``SNOWBOY_LOCATION`` is the path to the Snowboy root directory, and ``LIST_OF_HOT_WORD_FILES`` is a list of paths to Snowboy hotword configuration files (`*.pmdl` or `*.umdl` format). + + This operation will always complete within ``timeout + phrase_timeout`` seconds if both are numbers, either by returning the audio data, or by raising a ``speech_recognition.WaitTimeoutError`` exception. """ assert isinstance(source, AudioSource), "Source must be an audio source" - assert source.stream is not None, ( - "Audio source must be entered before listening, see documentation for " - "``AudioSource``; are you using ``source`` outside of a ``with`` statement?" - ) + assert source.stream is not None, "Audio source must be entered before listening, see documentation for ``AudioSource``; are you using ``source`` outside of a ``with`` statement?" assert self.pause_threshold >= self.non_speaking_duration >= 0 if snowboy_configuration is not None: - assert os.path.isfile( - os.path.join(snowboy_configuration[0], "snowboydetect.py") - ), ( - "``snowboy_configuration[0]`` must be a Snowboy root directory " - "containing ``snowboydetect.py``" - ) + assert os.path.isfile(os.path.join(snowboy_configuration[0], "snowboydetect.py")), "``snowboy_configuration[0]`` must be a Snowboy root directory containing ``snowboydetect.py``" for hot_word_file in snowboy_configuration[1]: - assert os.path.isfile(hot_word_file), ( - "``snowboy_configuration[1]`` must be a list of Snowboy hot word " - "configuration files" - ) + assert os.path.isfile(hot_word_file), "``snowboy_configuration[1]`` must be a list of Snowboy hot word configuration files" seconds_per_buffer = float(source.CHUNK) / source.SAMPLE_RATE - - # number of buffers of non-speaking audio during a phrase, before the phrase - # should be considered complete - pause_buffer_count = int(math.ceil(self.pause_threshold / seconds_per_buffer)) - - # minimum number of buffers of speaking audio - # before we consider the speaking audio a phrase - phrase_buffer_count = int(math.ceil(self.phrase_threshold / seconds_per_buffer)) - - # maximum number of buffers of non-speaking audio - # to retain before and after a phrase - non_speaking_buffer_count = int( - math.ceil(self.non_speaking_duration / seconds_per_buffer) - ) + pause_buffer_count = int(math.ceil(self.pause_threshold / seconds_per_buffer)) # number of buffers of non-speaking audio during a phrase, before the phrase should be considered complete + phrase_buffer_count = int(math.ceil(self.phrase_threshold / seconds_per_buffer)) # minimum number of buffers of speaking audio before we consider the speaking audio a phrase + non_speaking_buffer_count = int(math.ceil(self.non_speaking_duration / seconds_per_buffer)) # maximum number of buffers of non-speaking audio to retain before and after a phrase # read audio input for phrases until there is a phrase that is long enough elapsed_time = 0 # number of seconds of audio read - # an empty buffer means that the stream has ended - # and there is no data left to read - audio_buffer = b"" + buffer = b"" # an empty buffer means that the stream has ended and there is no data left to read while True: frames = collections.deque() @@ -775,48 +502,30 @@ def listen( # handle waiting too long for phrase by raising an exception elapsed_time += seconds_per_buffer if timeout and elapsed_time > timeout: - raise WaitTimeoutError( - "listening timed out while waiting for phrase to start" - ) - - audio_buffer = source.stream.read(source.CHUNK) - if len(audio_buffer) == 0: - break # reached end of the stream - frames.append(audio_buffer) + raise WaitTimeoutError("listening timed out while waiting for phrase to start") - # ensure we only keep the needed amount of non-speaking buffers - if len(frames) > non_speaking_buffer_count: + buffer = source.stream.read(source.CHUNK) + if len(buffer) == 0: break # reached end of the stream + frames.append(buffer) + if len(frames) > non_speaking_buffer_count: # ensure we only keep the needed amount of non-speaking buffers frames.popleft() # detect whether speaking has started on audio input + energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal + if energy > self.energy_threshold: break - # energy of the audio signal - energy = audioop.rms(audio_buffer, source.SAMPLE_WIDTH) - if energy > self.energy_threshold: - break - - # dynamically adjust the energy threshold using asymmetric - # weighted average + # dynamically adjust the energy threshold using asymmetric weighted average if self.dynamic_energy_threshold: - # account for different chunk sizes and rates - damping = ( - self.dynamic_energy_adjustment_damping**seconds_per_buffer - ) + damping = self.dynamic_energy_adjustment_damping ** seconds_per_buffer # account for different chunk sizes and rates target_energy = energy * self.dynamic_energy_ratio - self.energy_threshold = ( - self.energy_threshold * damping - + target_energy * (1 - damping) - ) + self.energy_threshold = self.energy_threshold * damping + target_energy * (1 - damping) else: # read audio input until the hotword is said snowboy_location, snowboy_hot_word_files = snowboy_configuration - audio_buffer, delta_time = self.snowboy_wait_for_hot_word( - snowboy_location, snowboy_hot_word_files, source, timeout - ) + buffer, delta_time = self.snowboy_wait_for_hot_word(snowboy_location, snowboy_hot_word_files, source, timeout) elapsed_time += delta_time - if len(audio_buffer) == 0: - break # reached end of the stream - frames.append(audio_buffer) + if len(buffer) == 0: break # reached end of the stream + frames.append(buffer) # read audio input until the phrase ends pause_count, phrase_count = 0, 0 @@ -824,23 +533,16 @@ def listen( while True: # handle phrase being too long by cutting off the audio elapsed_time += seconds_per_buffer - if ( - phrase_time_limit - and elapsed_time - phrase_start_time > phrase_time_limit - ): + if phrase_time_limit and elapsed_time - phrase_start_time > phrase_time_limit: break - audio_buffer = source.stream.read(source.CHUNK) - if len(audio_buffer) == 0: - break # reached end of the stream - frames.append(audio_buffer) + buffer = source.stream.read(source.CHUNK) + if len(buffer) == 0: break # reached end of the stream + frames.append(buffer) phrase_count += 1 - # check if speaking has stopped for longer than the pause threshold - # on the audio input - - # unit energy of the audio signal within the buffer - energy = audioop.rms(audio_buffer, source.SAMPLE_WIDTH) + # check if speaking has stopped for longer than the pause threshold on the audio input + energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # unit energy of the audio signal within the buffer if energy > self.energy_threshold: pause_count = 0 else: @@ -848,49 +550,25 @@ def listen( if pause_count > pause_buffer_count: # end of the phrase break - # check how long the detected phrase is, - # and retry listening if the phrase is too short - - # exclude the buffers for the pause before the phrase - phrase_count -= pause_count - - if phrase_count >= phrase_buffer_count or len(audio_buffer) == 0: - # phrase is long enough or we've reached the end of the stream, - # so stop listening - break + # check how long the detected phrase is, and retry listening if the phrase is too short + phrase_count -= pause_count # exclude the buffers for the pause before the phrase + if phrase_count >= phrase_buffer_count or len(buffer) == 0: break # phrase is long enough or we've reached the end of the stream, so stop listening # obtain frame data - for _ in range(pause_count - non_speaking_buffer_count): - frames.pop() # remove extra non-speaking frames at the end + for i in range(pause_count - non_speaking_buffer_count): frames.pop() # remove extra non-speaking frames at the end frame_data = b"".join(frames) return AudioData(frame_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH) def listen_in_background(self, source, callback, phrase_time_limit=None): """ - Spawns a thread to repeatedly record phrases from ``source`` (an - ``AudioSource`` instance) into an ``AudioData`` instance and call ``callback`` - with that ``AudioData`` instance as soon as each phrase are detected. - - Returns a function object that, when called, requests that the background - listener thread stop. The background thread is a daemon and will not stop the - program from exiting if there are no other non-daemon threads. The function - accepts one parameter, ``wait_for_stop``: if truthy, the function will wait for - the background listener to stop before returning, otherwise it will return - immediately and the background listener thread might still be running for a - second or two afterwards. Additionally, if you are using a truthy value for - ``wait_for_stop``, you must call the function from the same thread you - originally called ``listen_in_background`` from. - - Phrase recognition uses the exact same mechanism as - ``recognizer_instance.listen(source)``. The ``phrase_time_limit`` parameter - works in the same way as the ``phrase_time_limit`` parameter for - ``recognizer_instance.listen(source)``, as well. - - The ``callback`` parameter is a function that should accept two parameters - - the ``recognizer_instance``, and an ``AudioData`` instance representing the - captured audio. Note that ``callback`` function will be called from a non-main - thread. + Spawns a thread to repeatedly record phrases from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance and call ``callback`` with that ``AudioData`` instance as soon as each phrase are detected. + + Returns a function object that, when called, requests that the background listener thread stop. The background thread is a daemon and will not stop the program from exiting if there are no other non-daemon threads. The function accepts one parameter, ``wait_for_stop``: if truthy, the function will wait for the background listener to stop before returning, otherwise it will return immediately and the background listener thread might still be running for a second or two afterwards. Additionally, if you are using a truthy value for ``wait_for_stop``, you must call the function from the same thread you originally called ``listen_in_background`` from. + + Phrase recognition uses the exact same mechanism as ``recognizer_instance.listen(source)``. The ``phrase_time_limit`` parameter works in the same way as the ``phrase_time_limit`` parameter for ``recognizer_instance.listen(source)``, as well. + + The ``callback`` parameter is a function that should accept two parameters - the ``recognizer_instance``, and an ``AudioData`` instance representing the captured audio. Note that ``callback`` function will be called from a non-main thread. """ assert isinstance(source, AudioSource), "Source must be an audio source" running = [True] @@ -898,199 +576,95 @@ def listen_in_background(self, source, callback, phrase_time_limit=None): def threaded_listen(): with source as s: while running[0]: - # listen for 1 second, then check again - # if the stop function has been called - try: + try: # listen for 1 second, then check again if the stop function has been called audio = self.listen(s, 1, phrase_time_limit) - - # listening timed out, just try again - except WaitTimeoutError: + except WaitTimeoutError: # listening timed out, just try again pass else: - if running[0]: - callback(self, audio) + if running[0]: callback(self, audio) def stopper(wait_for_stop=True): running[0] = False if wait_for_stop: - # block until the background thread is done, - # which can take around 1 second - listener_thread.join() + listener_thread.join() # block until the background thread is done, which can take around 1 second listener_thread = threading.Thread(target=threaded_listen) listener_thread.daemon = True listener_thread.start() return stopper - def recognize_sphinx( - self, - audio_data, - language="en-US", - keyword_entries=None, - grammar=None, - show_all=False, - ): + def recognize_sphinx(self, audio_data, language="en-US", keyword_entries=None, grammar=None, show_all=False): """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), - using CMU Sphinx. - - The recognition language is determined by ``language``, an RFC5646 language tag - like ``"en-US"`` or ``"en-GB"``, defaulting to US English. Out of the box, only - ``en-US`` is supported. See `Notes on using `PocketSphinx - `__ - for information about installing other languages. This document is also - included under ``reference/pocketsphinx.rst``. The ``language`` parameter can - also be a tuple of filesystem paths, of the form - ``(acoustic_parameters_directory, language_model_file, - phoneme_dictionary_file)`` - this allows you to load arbitrary Sphinx models. - - If specified, the keywords to search for are determined by ``keyword_entries``, - an iterable of tuples of the form ``(keyword, sensitivity)``, where ``keyword`` - is a phrase, and ``sensitivity`` is how sensitive to this phrase the recognizer - should be, on a scale of 0 (very insensitive, more false negatives) to 1 (very - sensitive, more false positives) inclusive. If not specified or ``None``, no - keywords are used and Sphinx will simply transcribe whatever words it - recognizes. Specifying ``keyword_entries`` is more accurate than just looking - for those same keywords in non-keyword-based transcriptions, because Sphinx - knows specifically what sounds to look for. - - Sphinx can also handle FSG or JSGF grammars. The parameter ``grammar`` expects - a path to the grammar file. Note that if a JSGF grammar is passed, an FSG - grammar will be created at the same location to speed up execution in the next - run. If ``keyword_entries`` are passed, content of ``grammar`` will be ignored. - - Returns the most likely transcription if ``show_all`` is false (the default). - Otherwise, returns the Sphinx ``pocketsphinx.pocketsphinx.Decoder`` object - resulting from the recognition. - - Raises a ``speech_recognition.UnknownValueError`` exception if the speech is - unintelligible. Raises a ``speech_recognition.RequestError`` exception if there - are any issues with the Sphinx installation. + Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using CMU Sphinx. + + The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` or ``"en-GB"``, defaulting to US English. Out of the box, only ``en-US`` is supported. See `Notes on using `PocketSphinx `__ for information about installing other languages. This document is also included under ``reference/pocketsphinx.rst``. The ``language`` parameter can also be a tuple of filesystem paths, of the form ``(acoustic_parameters_directory, language_model_file, phoneme_dictionary_file)`` - this allows you to load arbitrary Sphinx models. + + If specified, the keywords to search for are determined by ``keyword_entries``, an iterable of tuples of the form ``(keyword, sensitivity)``, where ``keyword`` is a phrase, and ``sensitivity`` is how sensitive to this phrase the recognizer should be, on a scale of 0 (very insensitive, more false negatives) to 1 (very sensitive, more false positives) inclusive. If not specified or ``None``, no keywords are used and Sphinx will simply transcribe whatever words it recognizes. Specifying ``keyword_entries`` is more accurate than just looking for those same keywords in non-keyword-based transcriptions, because Sphinx knows specifically what sounds to look for. + + Sphinx can also handle FSG or JSGF grammars. The parameter ``grammar`` expects a path to the grammar file. Note that if a JSGF grammar is passed, an FSG grammar will be created at the same location to speed up execution in the next run. If ``keyword_entries`` are passed, content of ``grammar`` will be ignored. + + Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the Sphinx ``pocketsphinx.pocketsphinx.Decoder`` object resulting from the recognition. + + Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if there are any issues with the Sphinx installation. """ - assert isinstance(audio_data, AudioData), AUDIO_DATA_MUST_BE_AUDIO_DATA - assert isinstance(language, str) or ( - isinstance(language, tuple) and len(language) == 3 - ), ( - "``language`` must be a string or 3-tuple of Sphinx data file paths of the " - "form ``(acoustic_parameters, language_model, phoneme_dictionary)``" - ) - assert keyword_entries is None or all( - isinstance(keyword, str) and 0 <= sensitivity <= 1 - for keyword, sensitivity in keyword_entries - ), ( - "``keyword_entries`` must be ``None`` or a list of pairs of strings and " - "numbers between 0 and 1" - ) + assert isinstance(audio_data, AudioData), "``audio_data`` must be audio data" + assert isinstance(language, str) or (isinstance(language, tuple) and len(language) == 3), "``language`` must be a string or 3-tuple of Sphinx data file paths of the form ``(acoustic_parameters, language_model, phoneme_dictionary)``" + assert keyword_entries is None or all(isinstance(keyword, (type(""), type(u""))) and 0 <= sensitivity <= 1 for keyword, sensitivity in keyword_entries), "``keyword_entries`` must be ``None`` or a list of pairs of strings and numbers between 0 and 1" # import the PocketSphinx speech recognition module try: - from pocketsphinx import FsgModel, Jsgf, pocketsphinx + from pocketsphinx import pocketsphinx, Jsgf, FsgModel except ImportError: - raise RequestError( - "missing PocketSphinx module: ensure that PocketSphinx is set " - "up correctly." - ) + raise RequestError("missing PocketSphinx module: ensure that PocketSphinx is set up correctly.") except ValueError: - raise RequestError( - "bad PocketSphinx installation; try reinstalling PocketSphinx " - "version 0.0.9 or better." - ) - if not hasattr(pocketsphinx, "Decoder") or not hasattr( - pocketsphinx.Decoder, "default_config" - ): - raise RequestError( - "outdated PocketSphinx installation; ensure you have PocketSphinx " - "version 0.0.9 or better." - ) + raise RequestError("bad PocketSphinx installation; try reinstalling PocketSphinx version 0.0.9 or better.") + if not hasattr(pocketsphinx, "Decoder") or not hasattr(pocketsphinx.Decoder, "default_config"): + raise RequestError("outdated PocketSphinx installation; ensure you have PocketSphinx version 0.0.9 or better.") if isinstance(language, str): # directory containing language data - language_directory = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - "pocketsphinx-data", - language, - ) + language_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "pocketsphinx-data", language) if not os.path.isdir(language_directory): - raise RequestError( - 'missing PocketSphinx language data directory: "{}"'.format( - language_directory - ) - ) - acoustic_parameters_directory = os.path.join( - language_directory, "acoustic-model" - ) - language_model_file = os.path.join( - language_directory, "language-model.lm.bin" - ) - phoneme_dictionary_file = os.path.join( - language_directory, "pronounciation-dictionary.dict" - ) + raise RequestError("missing PocketSphinx language data directory: \"{}\"".format(language_directory)) + acoustic_parameters_directory = os.path.join(language_directory, "acoustic-model") + language_model_file = os.path.join(language_directory, "language-model.lm.bin") + phoneme_dictionary_file = os.path.join(language_directory, "pronounciation-dictionary.dict") else: # 3-tuple of Sphinx data file paths - ( - acoustic_parameters_directory, - language_model_file, - phoneme_dictionary_file, - ) = language + acoustic_parameters_directory, language_model_file, phoneme_dictionary_file = language if not os.path.isdir(acoustic_parameters_directory): - raise RequestError( - 'missing PocketSphinx language model parameters directory: "{}"'.format( - acoustic_parameters_directory - ) - ) + raise RequestError("missing PocketSphinx language model parameters directory: \"{}\"".format(acoustic_parameters_directory)) if not os.path.isfile(language_model_file): - raise RequestError( - 'missing PocketSphinx language model file: "{}"'.format( - language_model_file - ) - ) + raise RequestError("missing PocketSphinx language model file: \"{}\"".format(language_model_file)) if not os.path.isfile(phoneme_dictionary_file): - raise RequestError( - 'missing PocketSphinx phoneme dictionary file: "{}"'.format( - phoneme_dictionary_file - ) - ) + raise RequestError("missing PocketSphinx phoneme dictionary file: \"{}\"".format(phoneme_dictionary_file)) # create decoder object config = pocketsphinx.Decoder.default_config() - - # set the path of the hidden Markov model (HMM) parameter files - config.set_string("-hmm", acoustic_parameters_directory) - + config.set_string("-hmm", acoustic_parameters_directory) # set the path of the hidden Markov model (HMM) parameter files config.set_string("-lm", language_model_file) config.set_string("-dict", phoneme_dictionary_file) - - # disable logging (logging causes unwanted output in terminal) - config.set_string("-logfn", os.devnull) - + config.set_string("-logfn", os.devnull) # disable logging (logging causes unwanted output in terminal) decoder = pocketsphinx.Decoder(config) # obtain audio data - - # the included language models require audio to be - # 16-bit mono 16 kHz in little-endian format - raw_data = audio_data.get_raw_data(convert_rate=16000, convert_width=2) + raw_data = audio_data.get_raw_data(convert_rate=16000, convert_width=2) # the included language models require audio to be 16-bit mono 16 kHz in little-endian format # obtain recognition results if keyword_entries is not None: # explicitly specified set of keywords with PortableNamedTemporaryFile("w") as f: - # generate a keywords file - Sphinx documentation recommendeds - # sensitivities between 1e-50 and 1e-5 - f.writelines( - "{} /1e{}/\n".format(keyword, 100 * sensitivity - 110) - for keyword, sensitivity in keyword_entries - ) + # generate a keywords file - Sphinx documentation recommendeds sensitivities between 1e-50 and 1e-5 + f.writelines("{} /1e{}/\n".format(keyword, 100 * sensitivity - 110) for keyword, sensitivity in keyword_entries) f.flush() - # perform the speech recognition with the keywords file (this is inside - # the context manager so the file isn;t deleted until we're done) + # perform the speech recognition with the keywords file (this is inside the context manager so the file isn;t deleted until we're done) decoder.set_kws("keywords", f.name) decoder.set_search("keywords") elif grammar is not None: # a path to a FSG or JSGF grammar if not os.path.exists(grammar): - raise ValueError(f"Grammar '{grammar}' does not exist.") + raise ValueError("Grammar '{0}' does not exist.".format(grammar)) grammar_path = os.path.abspath(os.path.dirname(grammar)) grammar_name = os.path.splitext(os.path.basename(grammar))[0] - fsg_path = f"{grammar_path}/{grammar_name}.fsg" + fsg_path = "{0}/{1}.fsg".format(grammar_path, grammar_name) if not os.path.exists(fsg_path): # create FSG grammar if not available jsgf = Jsgf(grammar) rule = jsgf.get_rule("{0}.{0}".format(grammar_name)) @@ -1102,195 +676,113 @@ def recognize_sphinx( decoder.set_search(grammar_name) decoder.start_utt() # begin utterance processing - - # process audio data with recognition enabled (no_search = False), as a full - # utterance (full_utt = True) - decoder.process_raw(raw_data, False, True) - + decoder.process_raw(raw_data, False, True) # process audio data with recognition enabled (no_search = False), as a full utterance (full_utt = True) decoder.end_utt() # stop utterance processing - if show_all: - return decoder + if show_all: return decoder + # return results hypothesis = decoder.hyp() - if hypothesis is not None: - return hypothesis.hypstr + if hypothesis is not None: return hypothesis.hypstr raise UnknownValueError() # no transcriptions available - def recognize_google( - self, - audio_data, - key=None, - language="en-US", - pfilter=0, - show_all=False, - with_confidence=False, - ): + def recognize_google(self, audio_data, key=None, language="en-US", pfilter=0, show_all=False, with_confidence=False): """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), - using the Google Speech Recognition API. - - The Google Speech Recognition API key is specified by ``key``. If not specified, - it uses a generic key that works out of the box. This should generally be used - for personal or testing purposes only, as it **may be revoked by Google at any - time**. - - To obtain your own API key, simply following the steps on the `API Keys - `__ page at the Chromium - Developers site. In the Google Developers Console, Google Speech Recognition is - listed as "Speech API". - - The recognition language is determined by ``language``, an RFC5646 language tag - like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting - to US English. A list of supported language tags can be found in this - `StackOverflow answer `__. - - The profanity filter level can be adjusted with ``pfilter``: 0 - No filter, 1 - - Only shows the first character and replaces the rest with asterisks. - The default is level 0. - - Returns the most likely transcription if ``show_all`` is false (the default). - Otherwise, returns the raw API response as a JSON dictionary. - - Raises a ``speech_recognition.UnknownValueError`` exception if the speech is - unintelligible. Raises a ``speech_recognition.RequestError`` exception if the - speech recognition operation failed, if the key isn't valid, or if there is no - internet connection. + Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Google Speech Recognition API. + + The Google Speech Recognition API key is specified by ``key``. If not specified, it uses a generic key that works out of the box. This should generally be used for personal or testing purposes only, as it **may be revoked by Google at any time**. + + To obtain your own API key, simply following the steps on the `API Keys `__ page at the Chromium Developers site. In the Google Developers Console, Google Speech Recognition is listed as "Speech API". + + The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language tags can be found in this `StackOverflow answer `__. + + The profanity filter level can be adjusted with ``pfilter``: 0 - No filter, 1 - Only shows the first character and replaces the rest with asterisks. The default is level 0. + + Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the raw API response as a JSON dictionary. + + Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection. """ - assert isinstance(audio_data, AudioData), AUDIO_DATA_MUST_BE_AUDIO_DATA - assert key is None or isinstance( - key, str - ), "``key`` must be ``None`` or a string" - assert isinstance(language, str), LANGUAGE_MUST_BE_STRING + assert isinstance(audio_data, AudioData), "``audio_data`` must be audio data" + assert key is None or isinstance(key, str), "``key`` must be ``None`` or a string" + assert isinstance(language, str), "``language`` must be a string" flac_data = audio_data.get_flac_data( - convert_rate=None - if audio_data.sample_rate >= 8000 - else 8000, # audio samples must be at least 8 kHz - convert_width=2, # audio samples must be 16-bit - ) - if key is None: - key = "AIzaSyBOti4mM-6x9WDnZIjIeyEU21OpBXqWBgw" - url = "http://www.google.com/speech-api/v2/recognize?{}".format( - urlencode( - {"client": "chromium", "lang": language, "key": key, "pFilter": pfilter} - ) - ) - request = Request( - url, - data=flac_data, - headers={"Content-Type": f"audio/x-flac; rate={audio_data.sample_rate}"}, + convert_rate=None if audio_data.sample_rate >= 8000 else 8000, # audio samples must be at least 8 kHz + convert_width=2 # audio samples must be 16-bit ) + if key is None: key = "AIzaSyBOti4mM-6x9WDnZIjIeyEU21OpBXqWBgw" + url = "http://www.google.com/speech-api/v2/recognize?{}".format(urlencode({ + "client": "chromium", + "lang": language, + "key": key, + "pFilter": pfilter + })) + request = Request(url, data=flac_data, headers={"Content-Type": "audio/x-flac; rate={}".format(audio_data.sample_rate)}) # obtain audio transcription results try: response = urlopen(request, timeout=self.operation_timeout) except HTTPError as e: - raise RequestError(f"recognition request failed: {e.reason}") + raise RequestError("recognition request failed: {}".format(e.reason)) except URLError as e: - raise RequestError(f"recognition connection failed: {e.reason}") + raise RequestError("recognition connection failed: {}".format(e.reason)) response_text = response.read().decode("utf-8") # ignore any blank blocks actual_result = [] for line in response_text.split("\n"): - if not line: - continue + if not line: continue result = json.loads(line)["result"] if len(result) != 0: actual_result = result[0] break + # return results if show_all: return actual_result - if ( - not isinstance(actual_result, dict) - or len(actual_result.get("alternative", [])) == 0 - ): - raise UnknownValueError() + if not isinstance(actual_result, dict) or len(actual_result.get("alternative", [])) == 0: raise UnknownValueError() if "confidence" in actual_result["alternative"]: # return alternative with highest confidence score - best_hypothesis = max( - actual_result["alternative"], - key=lambda alternative: alternative["confidence"], - ) + best_hypothesis = max(actual_result["alternative"], key=lambda alternative: alternative["confidence"]) else: - # when there is no confidence available, - # we arbitrarily choose the first hypothesis. + # when there is no confidence available, we arbitrarily choose the first hypothesis. best_hypothesis = actual_result["alternative"][0] - if "transcript" not in best_hypothesis: - raise UnknownValueError() + if "transcript" not in best_hypothesis: raise UnknownValueError() # https://cloud.google.com/speech-to-text/docs/basics#confidence-values - # "Your code should not require the confidence field as it is not guaranteed - # to be accurate, or even set, in any of the results." + # "Your code should not require the confidence field as it is not guaranteed to be accurate, or even set, in any of the results." confidence = best_hypothesis.get("confidence", 0.5) if with_confidence: return best_hypothesis["transcript"], confidence return best_hypothesis["transcript"] - def recognize_google_cloud( - self, - audio_data, - credentials_json=None, - language="en-US", - preferred_phrases=None, - show_all=False, - ): + def recognize_google_cloud(self, audio_data, credentials_json=None, language="en-US", preferred_phrases=None, show_all=False): """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), - using the Google Cloud Speech API. - - This function requires a Google Cloud Platform account; see the - `Google Cloud Speech API Quickstart - `__ - for details and instructions. Basically, create a project, enable billing for - the project, enable the Google Cloud Speech API for the project, and set up - Service Account Key credentials for the project. The result is a JSON file - containing the API credentials. The text content of this JSON file is specified - by ``credentials_json``. If not specified, the library will try to - automatically `find the default API credentials JSON file - `__. - - The recognition language is determined by ``language``, which is a BCP-47 - language tag like ``"en-US"`` (US English). A list of supported language tags - can be found in the `Google Cloud Speech API documentation `__. - - If ``preferred_phrases`` is an iterable of phrase strings, those given phrases - will be more likely to be recognized over similar-sounding alternatives. - This is useful for things like keyword/command recognition or adding new - phrases that aren't in Google's vocabulary. Note that the API imposes certain - `restrictions on the list of phrase strings - `__. - - Returns the most likely transcription if ``show_all`` is False (the default). - Otherwise, returns the raw API response as a JSON dictionary. - - Raises a ``speech_recognition.UnknownValueError`` exception if the speech is - unintelligible. Raises a ``speech_recognition.RequestError`` exception if the - speech recognition operation failed, if the credentials aren't valid, or if - there is no Internet connection. + Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Google Cloud Speech API. + + This function requires a Google Cloud Platform account; see the `Google Cloud Speech API Quickstart `__ for details and instructions. Basically, create a project, enable billing for the project, enable the Google Cloud Speech API for the project, and set up Service Account Key credentials for the project. The result is a JSON file containing the API credentials. The text content of this JSON file is specified by ``credentials_json``. If not specified, the library will try to automatically `find the default API credentials JSON file `__. + + The recognition language is determined by ``language``, which is a BCP-47 language tag like ``"en-US"`` (US English). A list of supported language tags can be found in the `Google Cloud Speech API documentation `__. + + If ``preferred_phrases`` is an iterable of phrase strings, those given phrases will be more likely to be recognized over similar-sounding alternatives. This is useful for things like keyword/command recognition or adding new phrases that aren't in Google's vocabulary. Note that the API imposes certain `restrictions on the list of phrase strings `__. + + Returns the most likely transcription if ``show_all`` is False (the default). Otherwise, returns the raw API response as a JSON dictionary. + + Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the credentials aren't valid, or if there is no Internet connection. """ - assert isinstance(audio_data, AudioData), AUDIO_DATA_MUST_BE_AUDIO_DATA + assert isinstance(audio_data, AudioData), "``audio_data`` must be audio data" if credentials_json is None: - assert os.environ.get("GOOGLE_APPLICATION_CREDENTIALS") is not None - assert isinstance(language, str), LANGUAGE_MUST_BE_STRING - assert preferred_phrases is None or all( - isinstance(preferred_phrases, str) - for preferred_phrases in preferred_phrases - ), "``preferred_phrases`` must be a list of strings" + assert os.environ.get('GOOGLE_APPLICATION_CREDENTIALS') is not None + assert isinstance(language, str), "``language`` must be a string" + assert preferred_phrases is None or all(isinstance(preferred_phrases, (type(""), type(u""))) for preferred_phrases in preferred_phrases), "``preferred_phrases`` must be a list of strings" try: import socket - - from google.api_core.exceptions import GoogleAPICallError from google.cloud import speech + from google.api_core.exceptions import GoogleAPICallError except ImportError: - raise RequestError( - "missing google-cloud-speech module: ensure that " - "google-cloud-speech is set up correctly." - ) + raise RequestError('missing google-cloud-speech module: ensure that google-cloud-speech is set up correctly.') if credentials_json is not None: client = speech.SpeechClient.from_service_account_json(credentials_json) @@ -1298,30 +790,26 @@ def recognize_google_cloud( client = speech.SpeechClient() flac_data = audio_data.get_flac_data( - convert_rate=None - # audio sample rate must be between - # 8 kHz and 48 kHz inclusive - clamp sample rate into this range - if 8000 <= audio_data.sample_rate <= 48000 - else max(8000, min(audio_data.sample_rate, 48000)), - convert_width=2, # audio samples must be 16-bit + convert_rate=None if 8000 <= audio_data.sample_rate <= 48000 else max(8000, min(audio_data.sample_rate, 48000)), # audio sample rate must be between 8 kHz and 48 kHz inclusive - clamp sample rate into this range + convert_width=2 # audio samples must be 16-bit ) audio = speech.RecognitionAudio(content=flac_data) config = { - "encoding": speech.RecognitionConfig.AudioEncoding.FLAC, - "sample_rate_hertz": audio_data.sample_rate, - "language_code": language, + 'encoding': speech.RecognitionConfig.AudioEncoding.FLAC, + 'sample_rate_hertz': audio_data.sample_rate, + 'language_code': language } if preferred_phrases is not None: - config["speechContexts"] = [speech.SpeechContext(phrases=preferred_phrases)] + config['speechContexts'] = [speech.SpeechContext( + phrases=preferred_phrases + )] if show_all: - config[ - "enableWordTimeOffsets" - ] = True # some useful extra options for when we want all the output + config['enableWordTimeOffsets'] = True # some useful extra options for when we want all the output opts = {} if self.operation_timeout and socket.getdefaulttimeout() is None: - opts["timeout"] = self.operation_timeout + opts['timeout'] = self.operation_timeout config = speech.RecognitionConfig(**config) @@ -1330,468 +818,292 @@ def recognize_google_cloud( except GoogleAPICallError as e: raise RequestError(e) except URLError as e: - raise RequestError(f"recognition connection failed: {e.reason}") + raise RequestError("recognition connection failed: {0}".format(e.reason)) - if show_all: - return response - if len(response.results) == 0: - raise UnknownValueError() + if show_all: return response + if len(response.results) == 0: raise UnknownValueError() - transcript = "" + transcript = '' for result in response.results: - transcript += result.alternatives[0].transcript.strip() + " " + transcript += result.alternatives[0].transcript.strip() + ' ' return transcript def recognize_wit(self, audio_data, key, show_all=False): """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), - using the Wit.ai API. + Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Wit.ai API. - The Wit.ai API key is specified by ``key``. Unfortunately, these are not - available without `signing up for an account `__ and creating - an app. You will need to add at least one intent to the app before you can see - the API key, though the actual intent settings don't matter. + The Wit.ai API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account `__ and creating an app. You will need to add at least one intent to the app before you can see the API key, though the actual intent settings don't matter. - To get the API key for a Wit.ai app, go to the app's overview page, go to the - section titled "Make an API request", and look for something along the lines of - ``Authorization: Bearer XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX``; - ``XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`` - is the API key. Wit.ai API keys are 32-character uppercase alphanumeric strings. + To get the API key for a Wit.ai app, go to the app's overview page, go to the section titled "Make an API request", and look for something along the lines of ``Authorization: Bearer XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX``; ``XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`` is the API key. Wit.ai API keys are 32-character uppercase alphanumeric strings. The recognition language is configured in the Wit.ai app settings. - Returns the most likely transcription if ``show_all`` is false (the default). - Otherwise, returns the `raw API response - `__ as a JSON - dictionary. + Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response `__ as a JSON dictionary. - Raises a ``speech_recognition.UnknownValueError`` exception if the speech is - unintelligible. Raises a ``speech_recognition.RequestError`` exception if the - speech recognition operation failed, if the key isn't valid, or if there is no - internet connection. + Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection. """ - assert isinstance(audio_data, AudioData), AUDIO_DATA_MUST_BE_AUDIO_DATA - assert isinstance(key, str), KEY_MUST_BE_STRING + assert isinstance(audio_data, AudioData), "Data must be audio data" + assert isinstance(key, str), "``key`` must be a string" wav_data = audio_data.get_wav_data( - convert_rate=None - if audio_data.sample_rate >= 8000 - else 8000, # audio samples must be at least 8 kHz - convert_width=2, # audio samples should be 16-bit + convert_rate=None if audio_data.sample_rate >= 8000 else 8000, # audio samples must be at least 8 kHz + convert_width=2 # audio samples should be 16-bit ) url = "https://api.wit.ai/speech?v=20170307" - request = Request( - url, - data=wav_data, - headers={ - "Authorization": f"Bearer {key}", - "Content-Type": "audio/wav", - }, - ) + request = Request(url, data=wav_data, headers={"Authorization": "Bearer {}".format(key), "Content-Type": "audio/wav"}) try: response = urlopen(request, timeout=self.operation_timeout) except HTTPError as e: - raise RequestError(f"recognition request failed: {e.reason}") + raise RequestError("recognition request failed: {}".format(e.reason)) except URLError as e: - raise RequestError(f"recognition connection failed: {e.reason}") + raise RequestError("recognition connection failed: {}".format(e.reason)) response_text = response.read().decode("utf-8") result = json.loads(response_text) - if show_all: - return result - if "_text" not in result or result["_text"] is None: - raise UnknownValueError() + # return results + if show_all: return result + if "_text" not in result or result["_text"] is None: raise UnknownValueError() return result["_text"] - def recognize_azure( - self, - audio_data, - key, - language="en-US", - profanity="masked", - location="westus", - show_all=False, - ): + def recognize_azure(self, audio_data, key, language="en-US", profanity="masked", location="westus", show_all=False): """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), - using the Microsoft Azure Speech API. - - The Microsoft Azure Speech API key is specified by ``key``. Unfortunately, - these are not available without `signing up for an account - `__ - with Microsoft Azure. - - To get the API key, go to the `Microsoft Azure Portal Resources - `__ page, go to "All Resources" > "Add" > "See All" - > Search "Speech > "Create", and fill in the form to make a "Speech" resource. - On the resulting page (which is also accessible from the "All Resources" page - in the Azure Portal), go to the "Show Access Keys" page, which will have two - API keys, either of which can be used for the `key` parameter. Microsoft Azure - Speech API keys are 32-character lowercase hexadecimal strings. - - The recognition language is determined by ``language``, a BCP-47 language tag - like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting - to US English. A list of supported language values can be found in the - `API documentation `__ - under "Interactive and dictation mode". - - Returns the most likely transcription if ``show_all`` is false (the default). - Otherwise, returns the `raw API response - `__ - as a JSON dictionary. - - Raises a ``speech_recognition.UnknownValueError`` exception if the speech is - unintelligible. Raises a ``speech_recognition.RequestError`` exception if the - speech recognition operation failed, if the key isn't valid, or if there is no - internet connection. + Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Microsoft Azure Speech API. + + The Microsoft Azure Speech API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account `__ with Microsoft Azure. + + To get the API key, go to the `Microsoft Azure Portal Resources `__ page, go to "All Resources" > "Add" > "See All" > Search "Speech > "Create", and fill in the form to make a "Speech" resource. On the resulting page (which is also accessible from the "All Resources" page in the Azure Portal), go to the "Show Access Keys" page, which will have two API keys, either of which can be used for the `key` parameter. Microsoft Azure Speech API keys are 32-character lowercase hexadecimal strings. + + The recognition language is determined by ``language``, a BCP-47 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language values can be found in the `API documentation `__ under "Interactive and dictation mode". + + Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response `__ as a JSON dictionary. + + Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection. """ - assert isinstance(audio_data, AudioData), AUDIO_DATA_MUST_BE_AUDIO_DATA - assert isinstance(key, str), KEY_MUST_BE_STRING - # simple|detailed - # assert isinstance(result_format, str), "``format`` must be a string" - assert isinstance(language, str), LANGUAGE_MUST_BE_STRING - - result_format = "detailed" - access_token, expire_time = getattr( - self, "azure_cached_access_token", None - ), getattr(self, "azure_cached_access_token_expiry", None) + assert isinstance(audio_data, AudioData), "Data must be audio data" + assert isinstance(key, str), "``key`` must be a string" + # assert isinstance(result_format, str), "``format`` must be a string" # simple|detailed + assert isinstance(language, str), "``language`` must be a string" + + result_format = 'detailed' + access_token, expire_time = getattr(self, "azure_cached_access_token", None), getattr(self, "azure_cached_access_token_expiry", None) allow_caching = True try: - # we need monotonic time to avoid being affected by system clock changes, - # but this is only available in Python 3.3+ - from time import ( - monotonic, - ) + from time import monotonic # we need monotonic time to avoid being affected by system clock changes, but this is only available in Python 3.3+ except ImportError: - # monotonic time not available, don't cache access tokens - expire_time = None - # don't allow caching, since monotonic time isn't available - allow_caching = False - - # caching not enabled, first credential request, - # or the access token from the previous one expired - if expire_time is None or monotonic() > expire_time: + expire_time = None # monotonic time not available, don't cache access tokens + allow_caching = False # don't allow caching, since monotonic time isn't available + if expire_time is None or monotonic() > expire_time: # caching not enabled, first credential request, or the access token from the previous one expired # get an access token using OAuth - credential_url = ( - "https://" - + location - + ".api.cognitive.microsoft.com/sts/v1.0/issueToken" - ) - credential_request = Request( - credential_url, - data=b"", - headers={ - "Content-type": "application/x-www-form-urlencoded", - "Content-Length": "0", - "Ocp-Apim-Subscription-Key": key, - }, - ) + credential_url = "https://" + location + ".api.cognitive.microsoft.com/sts/v1.0/issueToken" + credential_request = Request(credential_url, data=b"", headers={ + "Content-type": "application/x-www-form-urlencoded", + "Content-Length": "0", + "Ocp-Apim-Subscription-Key": key, + }) if allow_caching: start_time = monotonic() try: - # credential response can take longer, - # use longer timeout instead of default one - credential_response = urlopen(credential_request, timeout=60) + credential_response = urlopen(credential_request, timeout=60) # credential response can take longer, use longer timeout instead of default one except HTTPError as e: - raise RequestError(f"credential request failed: {e.reason}") + raise RequestError("credential request failed: {}".format(e.reason)) except URLError as e: - raise RequestError(f"credential connection failed: {e.reason}") + raise RequestError("credential connection failed: {}".format(e.reason)) access_token = credential_response.read().decode("utf-8") if allow_caching: # save the token for the duration it is valid for self.azure_cached_access_token = access_token - # according to https://docs.microsoft.com/en-us/azure/cognitive-services/Speech-Service/rest-apis#authentication, - # the token expires in exactly 10 minutes - self.azure_cached_access_token_expiry = start_time + 600 + self.azure_cached_access_token_expiry = start_time + 600 # according to https://docs.microsoft.com/en-us/azure/cognitive-services/Speech-Service/rest-apis#authentication, the token expires in exactly 10 minutes wav_data = audio_data.get_wav_data( convert_rate=16000, # audio samples must be 8kHz or 16 kHz - convert_width=2, # audio samples should be 16-bit + convert_width=2 # audio samples should be 16-bit ) - url = ( - "https://" - + location - + ( - ".stt.speech.microsoft.com/speech/recognition/conversation/" - "cognitiveservices/v1?{}" - ).format( - urlencode( - { - "language": language, - "format": result_format, - "profanity": profanity, - } - ) - ) - ) + url = "https://" + location + ".stt.speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1?{}".format(urlencode({ + "language": language, + "format": result_format, + "profanity": profanity + })) - # chunked-transfer requests are only supported in the standard library - # as of Python 3.6+, use it if possible - request = Request( - url, - data=io.BytesIO(wav_data), - headers={ - "Authorization": f"Bearer {access_token}", - "Content-type": 'audio/wav; codec="audio/pcm"; samplerate=16000', + if sys.version_info >= (3, 6): # chunked-transfer requests are only supported in the standard library as of Python 3.6+, use it if possible + request = Request(url, data=io.BytesIO(wav_data), headers={ + "Authorization": "Bearer {}".format(access_token), + "Content-type": "audio/wav; codec=\"audio/pcm\"; samplerate=16000", "Transfer-Encoding": "chunked", - }, - ) + }) + else: # fall back on manually formatting the POST body as a chunked request + ascii_hex_data_length = "{:X}".format(len(wav_data)).encode("utf-8") + chunked_transfer_encoding_data = ascii_hex_data_length + b"\r\n" + wav_data + b"\r\n0\r\n\r\n" + request = Request(url, data=chunked_transfer_encoding_data, headers={ + "Authorization": "Bearer {}".format(access_token), + "Content-type": "audio/wav; codec=\"audio/pcm\"; samplerate=16000", + "Transfer-Encoding": "chunked", + }) try: response = urlopen(request, timeout=self.operation_timeout) except HTTPError as e: - raise RequestError(f"recognition request failed: {e.reason}") + raise RequestError("recognition request failed: {}".format(e.reason)) except URLError as e: - raise RequestError(f"recognition connection failed: {e.reason}") + raise RequestError("recognition connection failed: {}".format(e.reason)) response_text = response.read().decode("utf-8") result = json.loads(response_text) + # return results if show_all: return result - if ( - "RecognitionStatus" not in result - or result["RecognitionStatus"] != "Success" - or "NBest" not in result - ): + if "RecognitionStatus" not in result or result["RecognitionStatus"] != "Success" or "NBest" not in result: raise UnknownValueError() - return result["NBest"][0]["Display"], result["NBest"][0]["Confidence"] + return result['NBest'][0]["Display"], result['NBest'][0]["Confidence"] def recognize_bing(self, audio_data, key, language="en-US", show_all=False): """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), - using the Microsoft Bing Speech API. - - The Microsoft Bing Speech API key is specified by ``key``. Unfortunately, - these are not available without `signing up for an account - `__ - with Microsoft Azure. - - To get the API key, go to the `Microsoft Azure Portal Resources - `__ page, go to "All Resources" > "Add" > "See All" - > Search "Bing Speech API > "Create", and fill in the form to make a - "Bing Speech API" resource. On the resulting page (which is also accessible - from the "All Resources" page in the Azure Portal), go to the - "Show Access Keys" page, which will have two API keys, either of which can be - used for the `key` parameter. Microsoft Bing Speech API keys are 32-character - lowercase hexadecimal strings. - - The recognition language is determined by ``language``, a BCP-47 language tag - like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting - to US English. A list of supported language values can be found in the `API - documentation `__ - under "Interactive and dictation mode". - - Returns the most likely transcription if ``show_all`` is false (the default). - Otherwise, returns the `raw API response `__ - as a JSON dictionary. - - Raises a ``speech_recognition.UnknownValueError`` exception if the speech is - unintelligible. Raises a ``speech_recognition.RequestError`` exception if the - speech recognition operation failed, if the key isn't valid, or if there is no - internet connection. + Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Microsoft Bing Speech API. + + The Microsoft Bing Speech API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account `__ with Microsoft Azure. + + To get the API key, go to the `Microsoft Azure Portal Resources `__ page, go to "All Resources" > "Add" > "See All" > Search "Bing Speech API > "Create", and fill in the form to make a "Bing Speech API" resource. On the resulting page (which is also accessible from the "All Resources" page in the Azure Portal), go to the "Show Access Keys" page, which will have two API keys, either of which can be used for the `key` parameter. Microsoft Bing Speech API keys are 32-character lowercase hexadecimal strings. + + The recognition language is determined by ``language``, a BCP-47 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language values can be found in the `API documentation `__ under "Interactive and dictation mode". + + Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response `__ as a JSON dictionary. + + Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection. """ - assert isinstance(audio_data, AudioData), AUDIO_DATA_MUST_BE_AUDIO_DATA - assert isinstance(key, str), KEY_MUST_BE_STRING - assert isinstance(language, str), LANGUAGE_MUST_BE_STRING + assert isinstance(audio_data, AudioData), "Data must be audio data" + assert isinstance(key, str), "``key`` must be a string" + assert isinstance(language, str), "``language`` must be a string" - access_token, expire_time = getattr( - self, "bing_cached_access_token", None - ), getattr(self, "bing_cached_access_token_expiry", None) + access_token, expire_time = getattr(self, "bing_cached_access_token", None), getattr(self, "bing_cached_access_token_expiry", None) allow_caching = True try: - # we need monotonic time to avoid being affected by system clock changes, - # but this is only available in Python 3.3+ - from time import ( - monotonic, - ) + from time import monotonic # we need monotonic time to avoid being affected by system clock changes, but this is only available in Python 3.3+ except ImportError: - # monotonic time not available, don't cache access tokens - expire_time = None - # don't allow caching, since monotonic time isn't available - allow_caching = False - - # caching not enabled, first credential request, - # or the access token from the previous one expired - if expire_time is None or monotonic() > expire_time: + expire_time = None # monotonic time not available, don't cache access tokens + allow_caching = False # don't allow caching, since monotonic time isn't available + if expire_time is None or monotonic() > expire_time: # caching not enabled, first credential request, or the access token from the previous one expired # get an access token using OAuth credential_url = "https://api.cognitive.microsoft.com/sts/v1.0/issueToken" - credential_request = Request( - credential_url, - data=b"", - headers={ - "Content-type": "application/x-www-form-urlencoded", - "Content-Length": "0", - "Ocp-Apim-Subscription-Key": key, - }, - ) + credential_request = Request(credential_url, data=b"", headers={ + "Content-type": "application/x-www-form-urlencoded", + "Content-Length": "0", + "Ocp-Apim-Subscription-Key": key, + }) if allow_caching: start_time = monotonic() try: - # credential response can take longer, - # use longer timeout instead of default one - credential_response = urlopen(credential_request, timeout=60) + credential_response = urlopen(credential_request, timeout=60) # credential response can take longer, use longer timeout instead of default one except HTTPError as e: - raise RequestError(f"credential request failed: {e.reason}") + raise RequestError("credential request failed: {}".format(e.reason)) except URLError as e: - raise RequestError(f"credential connection failed: {e.reason}") + raise RequestError("credential connection failed: {}".format(e.reason)) access_token = credential_response.read().decode("utf-8") if allow_caching: # save the token for the duration it is valid for self.bing_cached_access_token = access_token - # according to https://docs.microsoft.com/en-us/azure/cognitive-services/speech/api-reference-rest/bingvoicerecognition, - # the token expires in exactly 10 minutes - self.bing_cached_access_token_expiry = start_time + 600 + self.bing_cached_access_token_expiry = start_time + 600 # according to https://docs.microsoft.com/en-us/azure/cognitive-services/speech/api-reference-rest/bingvoicerecognition, the token expires in exactly 10 minutes wav_data = audio_data.get_wav_data( convert_rate=16000, # audio samples must be 8kHz or 16 kHz - convert_width=2, # audio samples should be 16-bit + convert_width=2 # audio samples should be 16-bit ) - url = "https://speech.platform.bing.com/speech/recognition/interactive/cognitiveservices/v1?{}".format( - urlencode( - { - "language": language, - "locale": language, - "requestid": uuid.uuid4(), - } - ) - ) + url = "https://speech.platform.bing.com/speech/recognition/interactive/cognitiveservices/v1?{}".format(urlencode({ + "language": language, + "locale": language, + "requestid": uuid.uuid4(), + })) - # chunked-transfer requests are only supported in the standard library - # as of Python 3.6+, use it if possible - request = Request( - url, - data=io.BytesIO(wav_data), - headers={ - "Authorization": f"Bearer {access_token}", - "Content-type": 'audio/wav; codec="audio/pcm"; samplerate=16000', + if sys.version_info >= (3, 6): # chunked-transfer requests are only supported in the standard library as of Python 3.6+, use it if possible + request = Request(url, data=io.BytesIO(wav_data), headers={ + "Authorization": "Bearer {}".format(access_token), + "Content-type": "audio/wav; codec=\"audio/pcm\"; samplerate=16000", "Transfer-Encoding": "chunked", - }, - ) + }) + else: # fall back on manually formatting the POST body as a chunked request + ascii_hex_data_length = "{:X}".format(len(wav_data)).encode("utf-8") + chunked_transfer_encoding_data = ascii_hex_data_length + b"\r\n" + wav_data + b"\r\n0\r\n\r\n" + request = Request(url, data=chunked_transfer_encoding_data, headers={ + "Authorization": "Bearer {}".format(access_token), + "Content-type": "audio/wav; codec=\"audio/pcm\"; samplerate=16000", + "Transfer-Encoding": "chunked", + }) try: response = urlopen(request, timeout=self.operation_timeout) except HTTPError as e: - raise RequestError(f"recognition request failed: {e.reason}") + raise RequestError("recognition request failed: {}".format(e.reason)) except URLError as e: - raise RequestError(f"recognition connection failed: {e.reason}") + raise RequestError("recognition connection failed: {}".format(e.reason)) response_text = response.read().decode("utf-8") result = json.loads(response_text) - if show_all: - return result - if ( - "RecognitionStatus" not in result - or result["RecognitionStatus"] != "Success" - or "DisplayText" not in result - ): - raise UnknownValueError() + # return results + if show_all: return result + if "RecognitionStatus" not in result or result["RecognitionStatus"] != "Success" or "DisplayText" not in result: raise UnknownValueError() return result["DisplayText"] - def recognize_lex( - self, - audio_data, - bot_name, - bot_alias, - user_id, - content_type="audio/l16; rate=16000; channels=1", - access_key_id=None, - secret_access_key=None, - region=None, - ): + def recognize_lex(self, audio_data, bot_name, bot_alias, user_id, content_type="audio/l16; rate=16000; channels=1", access_key_id=None, secret_access_key=None, region=None): """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), - using the Amazon Lex API. + Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Amazon Lex API. - If access_key_id or secret_access_key is not set it will go through the list in - the link below + If access_key_id or secret_access_key is not set it will go through the list in the link below http://boto3.readthedocs.io/en/latest/guide/configuration.html#configuring-credentials """ - assert isinstance(audio_data, AudioData), AUDIO_DATA_MUST_BE_AUDIO_DATA + assert isinstance(audio_data, AudioData), "Data must be audio data" assert isinstance(bot_name, str), "``bot_name`` must be a string" assert isinstance(bot_alias, str), "``bot_alias`` must be a string" assert isinstance(user_id, str), "``user_id`` must be a string" assert isinstance(content_type, str), "``content_type`` must be a string" - assert access_key_id is None or isinstance( - access_key_id, str - ), "``access_key_id`` must be a string" - assert secret_access_key is None or isinstance( - secret_access_key, str - ), "``secret_access_key`` must be a string" + assert access_key_id is None or isinstance(access_key_id, str), "``access_key_id`` must be a string" + assert secret_access_key is None or isinstance(secret_access_key, str), "``secret_access_key`` must be a string" assert region is None or isinstance(region, str), "``region`` must be a string" try: import boto3 except ImportError: - raise RequestError( - "missing boto3 module: ensure that boto3 is set up correctly." - ) + raise RequestError("missing boto3 module: ensure that boto3 is set up correctly.") - client = boto3.client( - "lex-runtime", - aws_access_key_id=access_key_id, - aws_secret_access_key=secret_access_key, - region_name=region, - ) + client = boto3.client('lex-runtime', aws_access_key_id=access_key_id, + aws_secret_access_key=secret_access_key, + region_name=region) - raw_data = audio_data.get_raw_data(convert_rate=16000, convert_width=2) + raw_data = audio_data.get_raw_data( + convert_rate=16000, convert_width=2 + ) accept = "text/plain; charset=utf-8" - response = client.post_content( - botName=bot_name, - botAlias=bot_alias, - userId=user_id, - contentType=content_type, - accept=accept, - inputStream=raw_data, - ) + response = client.post_content(botName=bot_name, botAlias=bot_alias, userId=user_id, contentType=content_type, accept=accept, inputStream=raw_data) return response["inputTranscript"] def recognize_houndify(self, audio_data, client_id, client_key, show_all=False): """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), - using the Houndify API. - - The Houndify client ID and client key are specified by ``client_id`` and - ``client_key``, respectively. Unfortunately, these are not available without - `signing up for an account `__. Once logged - into the `dashboard `__, you will want to - select "Register a new client", and fill in the form as necessary. When at the - "Enable Domains" page, enable the "Speech To Text Only" domain, and then select - "Save & Continue". - - To get the client ID and client key for a Houndify client, go to the `dashboard - `__ and select the client's "View Details" - link. On the resulting page, the client ID and client key will be visible. - Client IDs and client keys are both Base64-encoded strings. + Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Houndify API. + + The Houndify client ID and client key are specified by ``client_id`` and ``client_key``, respectively. Unfortunately, these are not available without `signing up for an account `__. Once logged into the `dashboard `__, you will want to select "Register a new client", and fill in the form as necessary. When at the "Enable Domains" page, enable the "Speech To Text Only" domain, and then select "Save & Continue". + + To get the client ID and client key for a Houndify client, go to the `dashboard `__ and select the client's "View Details" link. On the resulting page, the client ID and client key will be visible. Client IDs and client keys are both Base64-encoded strings. Currently, only English is supported as a recognition language. - Returns the most likely transcription if ``show_all`` is false (the default). - Otherwise, returns the raw API response as a JSON dictionary. + Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the raw API response as a JSON dictionary. - Raises a ``speech_recognition.UnknownValueError`` exception if the speech is - unintelligible. Raises a ``speech_recognition.RequestError`` exception if the - speech recognition operation failed, if the key isn't valid, or if there is no - internet connection. + Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection. """ - assert isinstance(audio_data, AudioData), AUDIO_DATA_MUST_BE_AUDIO_DATA + assert isinstance(audio_data, AudioData), "Data must be audio data" assert isinstance(client_id, str), "``client_id`` must be a string" assert isinstance(client_key, str), "``client_key`` must be a string" wav_data = audio_data.get_wav_data( - convert_rate=None - if audio_data.sample_rate in [8000, 16000] - else 16000, # audio samples must be 8 kHz or 16 kHz - convert_width=2, # audio samples should be 16-bit + convert_rate=None if audio_data.sample_rate in [8000, 16000] else 16000, # audio samples must be 8 kHz or 16 kHz + convert_width=2 # audio samples should be 16-bit ) url = "https://api.houndify.com/v1/audio" user_id, request_id = str(uuid.uuid4()), str(uuid.uuid4()) @@ -1799,142 +1111,105 @@ def recognize_houndify(self, audio_data, client_id, client_key, show_all=False): request_signature = base64.urlsafe_b64encode( hmac.new( base64.urlsafe_b64decode(client_key), - user_id.encode("utf-8") - + b";" - + request_id.encode("utf-8") - + request_time.encode("utf-8"), - hashlib.sha256, + user_id.encode("utf-8") + b";" + request_id.encode("utf-8") + request_time.encode("utf-8"), + hashlib.sha256 ).digest() # get the HMAC digest as bytes ).decode("utf-8") - request = Request( - url, - data=wav_data, - headers={ - "Content-Type": "application/json", - "Hound-Request-Info": json.dumps( - {"ClientID": client_id, "UserID": user_id} - ), - "Hound-Request-Authentication": f"{user_id};{request_id}", - "Hound-Client-Authentication": "{};{};{}".format( - client_id, request_time, request_signature - ), - }, - ) + request = Request(url, data=wav_data, headers={ + "Content-Type": "application/json", + "Hound-Request-Info": json.dumps({"ClientID": client_id, "UserID": user_id}), + "Hound-Request-Authentication": "{};{}".format(user_id, request_id), + "Hound-Client-Authentication": "{};{};{}".format(client_id, request_time, request_signature) + }) try: response = urlopen(request, timeout=self.operation_timeout) except HTTPError as e: - raise RequestError(f"recognition request failed: {e.reason}") + raise RequestError("recognition request failed: {}".format(e.reason)) except URLError as e: - raise RequestError(f"recognition connection failed: {e.reason}") + raise RequestError("recognition connection failed: {}".format(e.reason)) response_text = response.read().decode("utf-8") result = json.loads(response_text) - if show_all: - return result + # return results + if show_all: return result if "Disambiguation" not in result or result["Disambiguation"] is None: raise UnknownValueError() - return ( - result["Disambiguation"]["ChoiceData"][0]["Transcription"], - result["Disambiguation"]["ChoiceData"][0]["ConfidenceScore"], - ) + return result['Disambiguation']['ChoiceData'][0]['Transcription'], result['Disambiguation']['ChoiceData'][0]['ConfidenceScore'] - def recognize_amazon( - self, - audio_data, - bucket_name=None, - access_key_id=None, - secret_access_key=None, - region=None, - job_name=None, - file_key=None, - ): + def recognize_amazon(self, audio_data, bucket_name=None, access_key_id=None, secret_access_key=None, region=None, job_name=None, file_key=None): """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance) using - Amazon Transcribe. https://aws.amazon.com/transcribe/ - If access_key_id or secret_access_key is not set it will go through the list in - the link: http://boto3.readthedocs.io/en/latest/guide/configuration.html#configuring-credentials + Performs speech recognition on ``audio_data`` (an ``AudioData`` instance) using Amazon Transcribe. + https://aws.amazon.com/transcribe/ + If access_key_id or secret_access_key is not set it will go through the list in the link below + http://boto3.readthedocs.io/en/latest/guide/configuration.html#configuring-credentials """ - assert access_key_id is None or isinstance( - access_key_id, str - ), "``access_key_id`` must be a string" - assert secret_access_key is None or isinstance( - secret_access_key, str - ), "``secret_access_key`` must be a string" + assert access_key_id is None or isinstance(access_key_id, str), "``access_key_id`` must be a string" + assert secret_access_key is None or isinstance(secret_access_key, str), "``secret_access_key`` must be a string" assert region is None or isinstance(region, str), "``region`` must be a string" - import multiprocessing import traceback import uuid - + import multiprocessing from botocore.exceptions import ClientError - proc = multiprocessing.current_process() check_existing = audio_data is None and job_name - bucket_name = bucket_name or (f"{str(uuid.uuid4())}-{proc.pid}") - job_name = job_name or (f"{str(uuid.uuid4())}-{proc.pid}") + bucket_name = bucket_name or ('%s-%s' % (str(uuid.uuid4()), proc.pid)) + job_name = job_name or ('%s-%s' % (str(uuid.uuid4()), proc.pid)) try: import boto3 except ImportError: - raise RequestError( - "missing boto3 module: ensure that boto3 is set up correctly." - ) + raise RequestError("missing boto3 module: ensure that boto3 is set up correctly.") transcribe = boto3.client( - "transcribe", + 'transcribe', aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key, - region_name=region, - ) + region_name=region) - s3 = boto3.client( - "s3", + s3 = boto3.client('s3', aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key, - region_name=region, - ) + region_name=region) session = boto3.Session( aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key, - region_name=region, + region_name=region ) # Upload audio data to S3. - filename = "%s.wav" % job_name + filename = '%s.wav' % job_name try: # Bucket creation fails surprisingly often, even if the bucket exists. # print('Attempting to create bucket %s...' % bucket_name) s3.create_bucket(Bucket=bucket_name) except ClientError as exc: - print(f"Error creating bucket {bucket_name}: {exc}") - s3res = session.resource("s3") - s3res.Bucket(bucket_name) + print('Error creating bucket %s: %s' % (bucket_name, exc)) + s3res = session.resource('s3') + bucket = s3res.Bucket(bucket_name) if audio_data is not None: - print("Uploading audio data...") + print('Uploading audio data...') wav_data = audio_data.get_wav_data() s3.put_object(Bucket=bucket_name, Key=filename, Body=wav_data) object_acl = s3res.ObjectAcl(bucket_name, filename) - object_acl.put(ACL="public-read") + object_acl.put(ACL='public-read') else: - print("Skipping audio upload.") - job_uri = f"https://{bucket_name}.s3.amazonaws.com/{filename}" + print('Skipping audio upload.') + job_uri = 'https://%s.s3.amazonaws.com/%s' % (bucket_name, filename) if check_existing: + # Wait for job to complete. try: status = transcribe.get_transcription_job(TranscriptionJobName=job_name) except ClientError as exc: - print("!" * 80) - print("Error getting job:", exc.response) - if exc.response["Error"]["Code"] == ("BadRequestException") and ( - "The requested job couldn't be found" - ) in str(exc): + print('!'*80) + print('Error getting job:', exc.response) + if exc.response['Error']['Code'] == 'BadRequestException' and "The requested job couldn't be found" in str(exc): # Some error caused the job we recorded to not exist on AWS. - # Likely we were interrupted right after retrieving and deleting - # the job but before recording the transcript. - + # Likely we were interrupted right after retrieving and deleting the job but before recording the transcript. # Reset and try again later. exc = TranscriptionNotReady() exc.job_name = None @@ -1943,88 +1218,81 @@ def recognize_amazon( else: # Some other error happened, so re-raise. raise + + job = status['TranscriptionJob'] + if job['TranscriptionJobStatus'] in ['COMPLETED'] and 'TranscriptFileUri' in job['Transcript']: - job = status["TranscriptionJob"] - if ( - job["TranscriptionJobStatus"] in ["COMPLETED"] - and "TranscriptFileUri" in job["Transcript"] - ): # Retrieve transcription JSON containing transcript. - transcript_uri = job["Transcript"]["TranscriptFileUri"] - import json - import urllib.request - + transcript_uri = job['Transcript']['TranscriptFileUri'] + import urllib.request, json with urllib.request.urlopen(transcript_uri) as json_data: d = json.load(json_data) confidences = [] - for item in d["results"]["items"]: - confidences.append(float(item["alternatives"][0]["confidence"])) + for item in d['results']['items']: + confidences.append(float(item['alternatives'][0]['confidence'])) confidence = 0.5 if confidences: - confidence = sum(confidences) / float(len(confidences)) - transcript = d["results"]["transcripts"][0]["transcript"] + confidence = sum(confidences)/float(len(confidences)) + transcript = d['results']['transcripts'][0]['transcript'] # Delete job. try: - # cleanup - transcribe.delete_transcription_job( - TranscriptionJobName=job_name - ) + transcribe.delete_transcription_job(TranscriptionJobName=job_name) # cleanup except Exception as exc: - print("Warning, could not clean up transcription: %s" % exc) + print('Warning, could not clean up transcription: %s' % exc) traceback.print_exc() # Delete S3 file. s3.delete_object(Bucket=bucket_name, Key=filename) return transcript, confidence - elif job["TranscriptionJobStatus"] in ["FAILED"]: + elif job['TranscriptionJobStatus'] in ['FAILED']: + # Delete job. try: - # cleanup - transcribe.delete_transcription_job(TranscriptionJobName=job_name) + transcribe.delete_transcription_job(TranscriptionJobName=job_name) # cleanup except Exception as exc: - print("Warning, could not clean up transcription: %s" % exc) + print('Warning, could not clean up transcription: %s' % exc) traceback.print_exc() # Delete S3 file. s3.delete_object(Bucket=bucket_name, Key=filename) - + exc = TranscriptionFailed() exc.job_name = None exc.file_key = None raise exc else: # Keep waiting. - print("Keep waiting.") + print('Keep waiting.') exc = TranscriptionNotReady() exc.job_name = job_name exc.file_key = None raise exc else: + # Launch the transcription job. # try: - # # pre-cleanup - # transcribe.delete_transcription_job(TranscriptionJobName=job_name) + # transcribe.delete_transcription_job(TranscriptionJobName=job_name) # pre-cleanup # except: - # # It's ok if this fails because the job hopefully doesn't exist yet. - # pass + # # It's ok if this fails because the job hopefully doesn't exist yet. + # pass try: transcribe.start_transcription_job( TranscriptionJobName=job_name, - Media={"MediaFileUri": job_uri}, - MediaFormat="wav", - LanguageCode="en-US", + Media={'MediaFileUri': job_uri}, + MediaFormat='wav', + LanguageCode='en-US' ) exc = TranscriptionNotReady() exc.job_name = job_name exc.file_key = None raise exc except ClientError as exc: - print("!" * 80) - print("Error starting job:", exc.response) - if exc.response["Error"]["Code"] == "LimitExceededException": + print('!'*80) + print('Error starting job:', exc.response) + if exc.response['Error']['Code'] == 'LimitExceededException': # Could not start job. Cancel everything. s3.delete_object(Bucket=bucket_name, Key=filename) exc = TranscriptionNotReady() @@ -2042,7 +1310,7 @@ def recognize_assemblyai(self, audio_data, api_token, job_name=None, **kwargs): """ def read_file(filename, chunk_size=5242880): - with open(filename, "rb") as _file: + with open(filename, 'rb') as _file: while True: data = _file.read(chunk_size) if not data: @@ -2059,43 +1327,46 @@ def read_file(filename, chunk_size=5242880): } response = requests.get(endpoint, headers=headers) data = response.json() - status = data["status"] + status = data['status'] - if status == "error": + if status == 'error': # Handle error. exc = TranscriptionFailed() exc.job_name = None exc.file_key = None raise exc # Handle success. - elif status == "completed": - confidence = data["confidence"] - text = data["text"] + elif status == 'completed': + confidence = data['confidence'] + text = data['text'] return text, confidence # Otherwise keep waiting. - print("Keep waiting.") + print('Keep waiting.') exc = TranscriptionNotReady() exc.job_name = job_name exc.file_key = None raise exc else: # Upload file. - headers = {"authorization": api_token} - response = requests.post( - "https://api.assemblyai.com/v2/upload", - headers=headers, - data=read_file(audio_data), - ) - upload_url = response.json()["upload_url"] + headers = {'authorization': api_token} + response = requests.post('https://api.assemblyai.com/v2/upload', + headers=headers, + data=read_file(audio_data)) + upload_url = response.json()['upload_url'] # Queue file for transcription. endpoint = "https://api.assemblyai.com/v2/transcript" - json = {"audio_url": upload_url} - headers = {"authorization": api_token, "content-type": "application/json"} + json = { + "audio_url": upload_url + } + headers = { + "authorization": api_token, + "content-type": "application/json" + } response = requests.post(endpoint, json=json, headers=headers) data = response.json() - transciption_id = data["id"] + transciption_id = data['id'] exc = TranscriptionNotReady() exc.job_name = transciption_id exc.file_key = None @@ -2103,83 +1374,51 @@ def read_file(filename, chunk_size=5242880): def recognize_ibm(self, audio_data, key, language="en-US", show_all=False): """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), - using the IBM Speech to Text API. - - The IBM Speech to Text username and password are specified by ``username`` and - ``password``, respectively. Unfortunately, these are not available without - `signing up for an account `__. - Once logged into the Bluemix console, follow the instructions for `creating an - IBM Watson service instance `__, - where the Watson service is "Speech To Text". IBM Speech to Text usernames are - strings of the form XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX, while passwords are - mixed-case alphanumeric strings. - - The recognition language is determined by ``language``, an RFC5646 language tag - with a dialect like ``"en-US"`` (US English) or ``"zh-CN"`` (Mandarin Chinese), - defaulting to US English. The supported language values are listed under the - ``model`` parameter of the `audio recognition API documentation `__, - in the form ``LANGUAGE_BroadbandModel``, where ``LANGUAGE`` is - the language value. - - Returns the most likely transcription if ``show_all`` is false (the default). - Otherwise, returns the `raw API response `__ - as a JSON dictionary. - - Raises a ``speech_recognition.UnknownValueError`` exception if the speech is - unintelligible. Raises a ``speech_recognition.RequestError`` exception if the - speech recognition operation failed, if the key isn't valid, or if there is no - internet connection. + Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the IBM Speech to Text API. + + The IBM Speech to Text username and password are specified by ``username`` and ``password``, respectively. Unfortunately, these are not available without `signing up for an account `__. Once logged into the Bluemix console, follow the instructions for `creating an IBM Watson service instance `__, where the Watson service is "Speech To Text". IBM Speech to Text usernames are strings of the form XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX, while passwords are mixed-case alphanumeric strings. + + The recognition language is determined by ``language``, an RFC5646 language tag with a dialect like ``"en-US"`` (US English) or ``"zh-CN"`` (Mandarin Chinese), defaulting to US English. The supported language values are listed under the ``model`` parameter of the `audio recognition API documentation `__, in the form ``LANGUAGE_BroadbandModel``, where ``LANGUAGE`` is the language value. + + Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response `__ as a JSON dictionary. + + Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection. """ - assert isinstance(audio_data, AudioData), AUDIO_DATA_MUST_BE_AUDIO_DATA - assert isinstance(key, str), KEY_MUST_BE_STRING + assert isinstance(audio_data, AudioData), "Data must be audio data" + assert isinstance(key, str), "``key`` must be a string" flac_data = audio_data.get_flac_data( - convert_rate=None - if audio_data.sample_rate >= 16000 - else 16000, # audio samples should be at least 16 kHz - convert_width=None - if audio_data.sample_width >= 2 - else 2, # audio samples should be at least 16-bit + convert_rate=None if audio_data.sample_rate >= 16000 else 16000, # audio samples should be at least 16 kHz + convert_width=None if audio_data.sample_width >= 2 else 2 # audio samples should be at least 16-bit ) url = "https://gateway-wdc.watsonplatform.net/speech-to-text/api/v1/recognize" - request = Request( - url, - data=flac_data, - headers={ - "Content-Type": "audio/x-flac", - }, - ) - request.get_method = lambda: "POST" - username = "apikey" + request = Request(url, data=flac_data, headers={ + "Content-Type": "audio/x-flac", + }) + request.get_method = lambda: 'POST' + username = 'apikey' password = key - authorization_value = base64.standard_b64encode( - f"{username}:{password}".encode() - ).decode("utf-8") - request.add_header("Authorization", f"Basic {authorization_value}") + authorization_value = base64.standard_b64encode("{}:{}".format(username, password).encode("utf-8")).decode("utf-8") + request.add_header("Authorization", "Basic {}".format(authorization_value)) try: response = urlopen(request, timeout=self.operation_timeout) except HTTPError as e: - raise RequestError(f"recognition request failed: {e.reason}") + raise RequestError("recognition request failed: {}".format(e.reason)) except URLError as e: - raise RequestError(f"recognition connection failed: {e.reason}") + raise RequestError("recognition connection failed: {}".format(e.reason)) response_text = response.read().decode("utf-8") result = json.loads(response_text) + # return results if show_all: return result - if ( - "results" not in result - or len(result["results"]) < 1 - or "alternatives" not in result["results"][0] - ): + if "results" not in result or len(result["results"]) < 1 or "alternatives" not in result["results"][0]: raise UnknownValueError() transcription = [] confidence = None for utterance in result["results"]: - if "alternatives" not in utterance: - raise UnknownValueError() + if "alternatives" not in utterance: raise UnknownValueError() for hypothesis in utterance["alternatives"]: if "transcript" in hypothesis: transcription.append(hypothesis["transcript"]) @@ -2187,15 +1426,10 @@ def recognize_ibm(self, audio_data, key, language="en-US", show_all=False): break return "\n".join(transcription), confidence - lasttfgraph = "" + lasttfgraph = '' tflabels = None - def recognize_tensorflow( - self, - audio_data, - tensor_graph="tensorflow-data/conv_actions_frozen.pb", - tensor_label="tensorflow-data/conv_actions_labels.txt", - ): + def recognize_tensorflow(self, audio_data, tensor_graph='tensorflow-data/conv_actions_frozen.pb', tensor_label='tensorflow-data/conv_actions_labels.txt'): """ Performs speech recognition on ``audio_data`` (an ``AudioData`` instance). @@ -2203,35 +1437,35 @@ def recognize_tensorflow( Path to Tensor Labels file loaded from ``tensor_label``. """ - assert isinstance(audio_data, AudioData), AUDIO_DATA_MUST_BE_AUDIO_DATA + assert isinstance(audio_data, AudioData), "Data must be audio data" assert isinstance(tensor_graph, str), "``tensor_graph`` must be a string" assert isinstance(tensor_label, str), "``tensor_label`` must be a string" try: import tensorflow as tf except ImportError: - raise RequestError( - "missing tensorflow module: ensure that tensorflow is set up correctly." - ) + raise RequestError("missing tensorflow module: ensure that tensorflow is set up correctly.") if not (tensor_graph == self.lasttfgraph): self.lasttfgraph = tensor_graph # load graph - with tf.gfile.FastGFile(tensor_graph, "rb") as f: + with tf.gfile.FastGFile(tensor_graph, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) - tf.import_graph_def(graph_def, name="") + tf.import_graph_def(graph_def, name='') # load labels self.tflabels = [line.rstrip() for line in tf.gfile.GFile(tensor_label)] - wav_data = audio_data.get_wav_data(convert_rate=16000, convert_width=2) + wav_data = audio_data.get_wav_data( + convert_rate=16000, convert_width=2 + ) with tf.Session() as sess: - input_layer_name = "wav_data:0" - output_layer_name = "labels_softmax:0" + input_layer_name = 'wav_data:0' + output_layer_name = 'labels_softmax:0' softmax_tensor = sess.graph.get_tensor_by_name(output_layer_name) - (predictions,) = sess.run(softmax_tensor, {input_layer_name: wav_data}) + predictions, = sess.run(softmax_tensor, {input_layer_name: wav_data}) # Sort labels in order of confidence top_k = predictions.argsort()[-1:][::-1] @@ -2239,46 +1473,28 @@ def recognize_tensorflow( human_string = self.tflabels[node_id] return human_string - def recognize_whisper( - self, - audio_data, - model="base", - show_dict=False, - load_options=None, - language=None, - translate=False, - **transcribe_options, - ): + def recognize_whisper(self, audio_data, model="base", show_dict=False, load_options=None, language=None, translate=False, **transcribe_options): """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), - using Whisper. + Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using Whisper. - The recognition language is determined by ``language``, an uncapitalized full - language name like "english" or "chinese". See the full language list at https://github.com/openai/whisper/blob/main/whisper/tokenizer.py + The recognition language is determined by ``language``, an uncapitalized full language name like "english" or "chinese". See the full language list at https://github.com/openai/whisper/blob/main/whisper/tokenizer.py - model can be any of tiny, base, small, medium, large, tiny.en, base.en, - small.en, medium.en. See https://github.com/openai/whisper for more details. + model can be any of tiny, base, small, medium, large, tiny.en, base.en, small.en, medium.en. See https://github.com/openai/whisper for more details. - If show_dict is true, returns the full dict response from Whisper, including - the detected language. Otherwise returns only the transcription. + If show_dict is true, returns the full dict response from Whisper, including the detected language. Otherwise returns only the transcription. You can translate the result to english with Whisper by passing translate=True - Other values are passed directly to whisper. See https://github.com/openai/whisper/blob/main/whisper/transcribe.py - for all options + Other values are passed directly to whisper. See https://github.com/openai/whisper/blob/main/whisper/transcribe.py for all options """ - assert isinstance(audio_data, AudioData), AUDIO_DATA_MUST_BE_AUDIO_DATA + assert isinstance(audio_data, AudioData), "Data must be audio data" import numpy as np import soundfile as sf import torch import whisper - if ( - load_options - or not hasattr(self, "whisper_model") - or self.whisper_model.get(model) is None - ): + if load_options or not hasattr(self, "whisper_model") or self.whisper_model.get(model) is None: self.whisper_model = getattr(self, "whisper_model", {}) self.whisper_model[model] = whisper.load_model(model, **load_options or {}) @@ -2293,7 +1509,7 @@ def recognize_whisper( language=language, task="translate" if translate else None, fp16=torch.cuda.is_available(), - **transcribe_options, + **transcribe_options ) if show_dict: @@ -2302,34 +1518,28 @@ def recognize_whisper( return result["text"] recognize_whisper_api = whisper.recognize_whisper_api - - def recognize_vosk(self, audio_data, language="en"): - from vosk import KaldiRecognizer, Model - - assert isinstance(audio_data, AudioData), AUDIO_DATA_MUST_BE_AUDIO_DATA - - if not hasattr(self, "vosk_model"): + + def recognize_vosk(self, audio_data, language='en'): + from vosk import Model, KaldiRecognizer + + assert isinstance(audio_data, AudioData), "Data must be audio data" + + if not hasattr(self, 'vosk_model'): if not os.path.exists("model"): - return ( - "Please download the model from " - "https://github.com/alphacep/vosk-api/blob/master/doc/models.md " - "and unpack as 'model' in the current folder." - ) + return "Please download the model from https://github.com/alphacep/vosk-api/blob/master/doc/models.md and unpack as 'model' in the current folder." + exit (1) self.vosk_model = Model("model") - rec = KaldiRecognizer(self.vosk_model, 16000) - - rec.AcceptWaveform(audio_data.get_raw_data(convert_rate=16000, convert_width=2)) + rec = KaldiRecognizer(self.vosk_model, 16000); + + rec.AcceptWaveform(audio_data.get_raw_data(convert_rate=16000, convert_width=2)); finalRecognition = rec.FinalResult() - + return finalRecognition -class PortableNamedTemporaryFile: - """Limited replacement for ``tempfile.NamedTemporaryFile``, except unlike - ``tempfile.NamedTemporaryFile``, the file can be opened again while it's currently - open, even on Windows.""" - +class PortableNamedTemporaryFile(object): + """Limited replacement for ``tempfile.NamedTemporaryFile``, except unlike ``tempfile.NamedTemporaryFile``, the file can be opened again while it's currently open, even on Windows.""" def __init__(self, mode="w+b"): self.mode = mode @@ -2363,75 +1573,24 @@ def flush(self, *args, **kwargs): WavFile = AudioFile # WavFile was renamed to AudioFile in 3.4.1 -def recognize_api( - self, - audio_data, - client_access_token, - language="en", - session_id=None, - show_all=False, -): +def recognize_api(self, audio_data, client_access_token, language="en", session_id=None, show_all=False): wav_data = audio_data.get_wav_data(convert_rate=16000, convert_width=2) url = "https://api.api.ai/v1/query" while True: boundary = uuid.uuid4().hex - if boundary.encode("utf-8") not in wav_data: - break - if session_id is None: - session_id = uuid.uuid4().hex - data = ( - b"--" - + boundary.encode("utf-8") - + b"\r\n" - + b'Content-Disposition: form-data; name="request"\r\n' - + b"Content-Type: application/json\r\n" - + b"\r\n" - + b'{"v": "20150910", "sessionId": "' - + session_id.encode("utf-8") - + b'", "lang": "' - + language.encode("utf-8") - + b'"}\r\n' - + b"--" - + boundary.encode("utf-8") - + b"\r\n" - + b'Content-Disposition: form-data; name="voiceData"; filename="audio.wav"\r\n' - + b"Content-Type: audio/wav\r\n" - + b"\r\n" - + wav_data - + b"\r\n" - + b"--" - + boundary.encode("utf-8") - + b"--\r\n" - ) - request = Request( - url, - data=data, - headers={ - "Authorization": f"Bearer {client_access_token}", - "Content-Length": str(len(data)), - "Expect": "100-continue", - "Content-Type": f"multipart/form-data; boundary={boundary}", - }, - ) - try: - response = urlopen(request, timeout=10) - except HTTPError as e: - raise RequestError(f"recognition request failed: {e.reason}") - except URLError as e: - raise RequestError(f"recognition connection failed: {e.reason}") + if boundary.encode("utf-8") not in wav_data: break + if session_id is None: session_id = uuid.uuid4().hex + data = b"--" + boundary.encode("utf-8") + b"\r\n" + b"Content-Disposition: form-data; name=\"request\"\r\n" + b"Content-Type: application/json\r\n" + b"\r\n" + b"{\"v\": \"20150910\", \"sessionId\": \"" + session_id.encode("utf-8") + b"\", \"lang\": \"" + language.encode("utf-8") + b"\"}\r\n" + b"--" + boundary.encode("utf-8") + b"\r\n" + b"Content-Disposition: form-data; name=\"voiceData\"; filename=\"audio.wav\"\r\n" + b"Content-Type: audio/wav\r\n" + b"\r\n" + wav_data + b"\r\n" + b"--" + boundary.encode("utf-8") + b"--\r\n" + request = Request(url, data=data, headers={"Authorization": "Bearer {}".format(client_access_token), "Content-Length": str(len(data)), "Expect": "100-continue", "Content-Type": "multipart/form-data; boundary={}".format(boundary)}) + try: response = urlopen(request, timeout=10) + except HTTPError as e: raise RequestError("recognition request failed: {}".format(e.reason)) + except URLError as e: raise RequestError("recognition connection failed: {}".format(e.reason)) response_text = response.read().decode("utf-8") result = json.loads(response_text) - if show_all: - return result - if ( - "status" not in result - or "errorType" not in result["status"] - or result["status"]["errorType"] != "success" - ): + if show_all: return result + if "status" not in result or "errorType" not in result["status"] or result["status"]["errorType"] != "success": raise UnknownValueError() return result["result"]["resolvedQuery"] -# API.AI Speech Recognition is deprecated/not recommended as of 3.5.0, -# and currently is only optionally available for paid plans -Recognizer.recognize_api = classmethod(recognize_api) +Recognizer.recognize_api = classmethod(recognize_api) # API.AI Speech Recognition is deprecated/not recommended as of 3.5.0, and currently is only optionally available for paid plans diff --git a/custom_speech_recognition/__main__.py b/custom_speech_recognition/__main__.py index c05a1fc..68f5652 100644 --- a/custom_speech_recognition/__main__.py +++ b/custom_speech_recognition/__main__.py @@ -5,27 +5,20 @@ try: print("A moment of silence, please...") - with m as source: - r.adjust_for_ambient_noise(source) - print(f"Set minimum energy threshold to {r.energy_threshold}") + with m as source: r.adjust_for_ambient_noise(source) + print("Set minimum energy threshold to {}".format(r.energy_threshold)) while True: print("Say something!") - with m as source: - audio = r.listen(source) + with m as source: audio = r.listen(source) print("Got it! Now to recognize it...") try: # recognize speech using Google Speech Recognition value = r.recognize_google(audio) - print(f"You said {value}") + print("You said {}".format(value)) except sr.UnknownValueError: print("Oops! Didn't catch that") except sr.RequestError as e: - print( - ( - "Uh oh! Couldn't request results from " - "Google Speech Recognition service; {}" - ).format(e) - ) + print("Uh oh! Couldn't request results from Google Speech Recognition service; {0}".format(e)) except KeyboardInterrupt: pass diff --git a/custom_speech_recognition/audio.py b/custom_speech_recognition/audio.py index b5d462b..0bff55e 100644 --- a/custom_speech_recognition/audio.py +++ b/custom_speech_recognition/audio.py @@ -9,24 +9,17 @@ import wave -class AudioData: +class AudioData(object): """ Creates a new ``AudioData`` instance, which represents mono audio data. - The raw audio data is specified by ``frame_data``, which is a sequence of bytes - representing audio samples. This is the frame data structure used by the PCM WAV - format. + The raw audio data is specified by ``frame_data``, which is a sequence of bytes representing audio samples. This is the frame data structure used by the PCM WAV format. - The width of each sample, in bytes, is specified by ``sample_width``. Each group of - ``sample_width`` bytes represents a single audio sample. + The width of each sample, in bytes, is specified by ``sample_width``. Each group of ``sample_width`` bytes represents a single audio sample. - The audio data is assumed to have a sample rate of ``sample_rate`` samples per - second (Hertz). + The audio data is assumed to have a sample rate of ``sample_rate`` samples per second (Hertz). - Usually, instances of this class are obtained from ``recognizer_instance.record`` - or ``recognizer_instance.listen``, or in the callback for - ``recognizer_instance.listen_in_background``, rather than instantiating them - directly. + Usually, instances of this class are obtained from ``recognizer_instance.record`` or ``recognizer_instance.listen``, or in the callback for ``recognizer_instance.listen_in_background``, rather than instantiating them directly. """ def __init__(self, frame_data, sample_rate, sample_width): @@ -40,12 +33,9 @@ def __init__(self, frame_data, sample_rate, sample_width): def get_segment(self, start_ms=None, end_ms=None): """ - Returns a new ``AudioData`` instance, trimmed to a given time interval. - In other words, an ``AudioData`` instance with the same audio data except - starting at ``start_ms`` milliseconds in and ending ``end_ms`` milliseconds in. + Returns a new ``AudioData`` instance, trimmed to a given time interval. In other words, an ``AudioData`` instance with the same audio data except starting at ``start_ms`` milliseconds in and ending ``end_ms`` milliseconds in. - If not specified, ``start_ms`` defaults to the beginning of the audio, and - ``end_ms`` defaults to the end. + If not specified, ``start_ms`` defaults to the beginning of the audio, and ``end_ms`` defaults to the end. """ assert ( start_ms is None or start_ms >= 0 @@ -56,11 +46,15 @@ def get_segment(self, start_ms=None, end_ms=None): if start_ms is None: start_byte = 0 else: - start_byte = int((start_ms * self.sample_rate * self.sample_width) // 1000) + start_byte = int( + (start_ms * self.sample_rate * self.sample_width) // 1000 + ) if end_ms is None: end_byte = len(self.frame_data) else: - end_byte = int((end_ms * self.sample_rate * self.sample_width) // 1000) + end_byte = int( + (end_ms * self.sample_rate * self.sample_width) // 1000 + ) return AudioData( self.frame_data[start_byte:end_byte], self.sample_rate, @@ -69,17 +63,13 @@ def get_segment(self, start_ms=None, end_ms=None): def get_raw_data(self, convert_rate=None, convert_width=None): """ - Returns a byte string representing the raw frame data for the audio represented - by the ``AudioData`` instance. + Returns a byte string representing the raw frame data for the audio represented by the ``AudioData`` instance. - If ``convert_rate`` is specified and the audio sample rate is not - ``convert_rate`` Hz, the resulting audio is resampled to match. + If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match. - If ``convert_width`` is specified and the audio samples are not - ``convert_width`` bytes each, the resulting audio is converted to match. + If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match. - Writing these bytes directly to a file results in a valid `RAW/PCM audio file - `__. + Writing these bytes directly to a file results in a valid `RAW/PCM audio file `__. """ assert ( convert_rate is None or convert_rate > 0 @@ -90,11 +80,11 @@ def get_raw_data(self, convert_rate=None, convert_width=None): raw_data = self.frame_data - # make sure unsigned 8-bit audio (which uses unsigned samples) is handled like - # higher sample width audio (which uses signed samples) + # make sure unsigned 8-bit audio (which uses unsigned samples) is handled like higher sample width audio (which uses signed samples) if self.sample_width == 1: - # subtract 128 from every sample to make them act like signed samples - raw_data = audioop.bias(raw_data, 1, -128) + raw_data = audioop.bias( + raw_data, 1, -128 + ) # subtract 128 from every sample to make them act like signed samples # resample audio at the desired rate if specified if convert_rate is not None and self.sample_rate != convert_rate: @@ -109,64 +99,62 @@ def get_raw_data(self, convert_rate=None, convert_width=None): # convert samples to desired sample width if specified if convert_width is not None and self.sample_width != convert_width: - # we're converting the audio into 24-bit (workaround for https://bugs.python.org/issue12866) - if convert_width == 3: - # convert audio into 32-bit first, which is always supported - raw_data = audioop.lin2lin(raw_data, self.sample_width, 4) - + if ( + convert_width == 3 + ): # we're converting the audio into 24-bit (workaround for https://bugs.python.org/issue12866) + raw_data = audioop.lin2lin( + raw_data, self.sample_width, 4 + ) # convert audio into 32-bit first, which is always supported try: - # test whether 24-bit audio is supported (for example, ``audioop`` - # in Python 3.3 and below don't support sample width 3, - # while Python 3.4+ does) - audioop.bias(b"", 3, 0) - - # this version of audioop doesn't support 24-bit audio (probably - # Python 3.3 or less) - except audioop.error: - # since we're in little endian, we discard the first byte from each - # 32-bit sample to get a 24-bit sample + audioop.bias( + b"", 3, 0 + ) # test whether 24-bit audio is supported (for example, ``audioop`` in Python 3.3 and below don't support sample width 3, while Python 3.4+ do) + except ( + audioop.error + ): # this version of audioop doesn't support 24-bit audio (probably Python 3.3 or less) raw_data = b"".join( - raw_data[i + 1 : i + 4] for i in range(0, len(raw_data), 4) - ) + raw_data[i + 1 : i + 4] + for i in range(0, len(raw_data), 4) + ) # since we're in little endian, we discard the first byte from each 32-bit sample to get a 24-bit sample else: # 24-bit audio fully supported, we don't need to shim anything raw_data = audioop.lin2lin( raw_data, self.sample_width, convert_width ) else: - raw_data = audioop.lin2lin(raw_data, self.sample_width, convert_width) + raw_data = audioop.lin2lin( + raw_data, self.sample_width, convert_width + ) - # if the output is 8-bit audio with unsigned samples, convert the samples we've - # been treating as signed to unsigned again + # if the output is 8-bit audio with unsigned samples, convert the samples we've been treating as signed to unsigned again if convert_width == 1: - # add 128 to every sample to make them act like unsigned samples again - raw_data = audioop.bias(raw_data, 1, 128) + raw_data = audioop.bias( + raw_data, 1, 128 + ) # add 128 to every sample to make them act like unsigned samples again return raw_data - def get_wav_data(self, convert_rate=None, convert_width=None, nchannels=1): + def get_wav_data(self, convert_rate=None, convert_width=None, nchannels = 1): """ - Returns a byte string representing the contents of a WAV file containing the - audio represented by the ``AudioData`` instance. + Returns a byte string representing the contents of a WAV file containing the audio represented by the ``AudioData`` instance. - If ``convert_width`` is specified and the audio samples are not - ``convert_width`` bytes each, the resulting audio is converted to match. + If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match. - If ``convert_rate`` is specified and the audio sample rate is not - ``convert_rate`` Hz, the resulting audio is resampled to match. + If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match. Writing these bytes directly to a file results in a valid `WAV file `__. """ raw_data = self.get_raw_data(convert_rate, convert_width) - sample_rate = self.sample_rate if convert_rate is None else convert_rate - sample_width = self.sample_width if convert_width is None else convert_width + sample_rate = ( + self.sample_rate if convert_rate is None else convert_rate + ) + sample_width = ( + self.sample_width if convert_width is None else convert_width + ) # generate the WAV file contents with io.BytesIO() as wav_file: wav_writer = wave.open(wav_file, "wb") - - # note that we can't use context manager, - # since that was only added in Python 3.4 - try: + try: # note that we can't use context manager, since that was only added in Python 3.4 wav_writer.setframerate(sample_rate) wav_writer.setsampwidth(sample_width) wav_writer.setnchannels(nchannels) @@ -178,30 +166,28 @@ def get_wav_data(self, convert_rate=None, convert_width=None, nchannels=1): def get_aiff_data(self, convert_rate=None, convert_width=None): """ - Returns a byte string representing the contents of an AIFF-C file containing - the audio represented by the ``AudioData`` instance. + Returns a byte string representing the contents of an AIFF-C file containing the audio represented by the ``AudioData`` instance. - If ``convert_width`` is specified and the audio samples are not - ``convert_width`` bytes each, the resulting audio is converted to match. + If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match. - If ``convert_rate`` is specified and the audio sample rate is not - ``convert_rate`` Hz, the resulting audio is resampled to match. + If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match. Writing these bytes directly to a file results in a valid `AIFF-C file `__. """ raw_data = self.get_raw_data(convert_rate, convert_width) - sample_rate = self.sample_rate if convert_rate is None else convert_rate - sample_width = self.sample_width if convert_width is None else convert_width - - # the AIFF format is big-endian, so we need to convert the little-endian - # raw data to big-endian + sample_rate = ( + self.sample_rate if convert_rate is None else convert_rate + ) + sample_width = ( + self.sample_width if convert_width is None else convert_width + ) - # ``audioop.byteswap`` was only added in Python 3.4 - if hasattr(audioop, "byteswap"): + # the AIFF format is big-endian, so we need to convert the little-endian raw data to big-endian + if hasattr( + audioop, "byteswap" + ): # ``audioop.byteswap`` was only added in Python 3.4 raw_data = audioop.byteswap(raw_data, sample_width) - else: - # manually reverse the bytes of each sample, - # which is slower but works well enough as a fallback + else: # manually reverse the bytes of each sample, which is slower but works well enough as a fallback raw_data = raw_data[sample_width - 1 :: -1] + b"".join( raw_data[i + sample_width : i : -1] for i in range(sample_width - 1, len(raw_data), sample_width) @@ -210,9 +196,7 @@ def get_aiff_data(self, convert_rate=None, convert_width=None): # generate the AIFF-C file contents with io.BytesIO() as aiff_file: aiff_writer = aifc.open(aiff_file, "wb") - # note that we can't use context manager, - # since that was only added in Python 3.4 - try: + try: # note that we can't use context manager, since that was only added in Python 3.4 aiff_writer.setframerate(sample_rate) aiff_writer.setsampwidth(sample_width) aiff_writer.setnchannels(1) @@ -224,18 +208,13 @@ def get_aiff_data(self, convert_rate=None, convert_width=None): def get_flac_data(self, convert_rate=None, convert_width=None): """ - Returns a byte string representing the contents of a FLAC file containing the - audio represented by the ``AudioData`` instance. + Returns a byte string representing the contents of a FLAC file containing the audio represented by the ``AudioData`` instance. - Note that 32-bit FLAC is not supported. If the audio data is 32-bit and - ``convert_width`` is not specified, then the resulting FLAC will be a 24-bit - FLAC. + Note that 32-bit FLAC is not supported. If the audio data is 32-bit and ``convert_width`` is not specified, then the resulting FLAC will be a 24-bit FLAC. - If ``convert_rate`` is specified and the audio sample rate is not - ``convert_rate`` Hz, the resulting audio is resampled to match. + If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match. - If ``convert_width`` is specified and the audio samples are not - ``convert_width`` bytes each, the resulting audio is converted to match. + If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match. Writing these bytes directly to a file results in a valid `FLAC file `__. """ @@ -243,34 +222,31 @@ def get_flac_data(self, convert_rate=None, convert_width=None): convert_width % 1 == 0 and 1 <= convert_width <= 3 ), "Sample width to convert to must be between 1 and 3 inclusive" - # resulting WAV data would be 32-bit, - # which is not convertable to FLAC using our encoder - if self.sample_width > 3 and convert_width is None: - # the largest supported sample width is 24-bit, - # so we'll limit the sample width to that - convert_width = 3 + if ( + self.sample_width > 3 and convert_width is None + ): # resulting WAV data would be 32-bit, which is not convertable to FLAC using our encoder + convert_width = 3 # the largest supported sample width is 24-bit, so we'll limit the sample width to that # run the FLAC converter with the WAV data to get the FLAC data wav_data = self.get_wav_data(convert_rate, convert_width) flac_converter = get_flac_converter() - - # on Windows, specify that the process is - # to be started without showing a console window - if os.name == "nt": + if ( + os.name == "nt" + ): # on Windows, specify that the process is to be started without showing a console window startup_info = subprocess.STARTUPINFO() - # specify that the wShowWindow field of `startup_info` contains a value - startup_info.dwFlags |= subprocess.STARTF_USESHOWWINDOW - # specify that the console window should be hidden - startup_info.wShowWindow = subprocess.SW_HIDE + startup_info.dwFlags |= ( + subprocess.STARTF_USESHOWWINDOW + ) # specify that the wShowWindow field of `startup_info` contains a value + startup_info.wShowWindow = ( + subprocess.SW_HIDE + ) # specify that the console window should be hidden else: startup_info = None # default startupinfo process = subprocess.Popen( [ flac_converter, "--stdout", - # put the resulting FLAC file in stdout, - # and make sure it's not mixed with any program output - "--totally-silent", + "--totally-silent", # put the resulting FLAC file in stdout, and make sure it's not mixed with any program output "--best", # highest level of compression available "-", # the input FLAC file contents will be given in stdin ], @@ -283,13 +259,12 @@ def get_flac_data(self, convert_rate=None, convert_width=None): def get_flac_converter(): - """Returns the absolute path of a FLAC converter executable, or raises an OSError - if none can be found.""" + """Returns the absolute path of a FLAC converter executable, or raises an OSError if none can be found.""" flac_converter = shutil_which("flac") # check for installed version first if flac_converter is None: # flac utility is not installed - # directory of the current module file, - # where all the FLAC bundled binaries are stored - base_path = os.path.dirname(os.path.abspath(__file__)) + base_path = os.path.dirname( + os.path.abspath(__file__) + ) # directory of the current module file, where all the FLAC bundled binaries are stored system, machine = platform.system(), platform.machine() if system == "Windows" and machine in { "i686", @@ -313,9 +288,7 @@ def get_flac_converter(): flac_converter = os.path.join(base_path, "flac-linux-x86_64") else: # no FLAC converter available raise OSError( - "FLAC conversion utility not available - consider installing the " - "FLAC command line application by running `apt-get install flac` " - "or your operating system's equivalent" + "FLAC conversion utility not available - consider installing the FLAC command line application by running `apt-get install flac` or your operating system's equivalent" ) # mark FLAC converter as executable if possible diff --git a/custom_speech_recognition/recognizers/whisper.py b/custom_speech_recognition/recognizers/whisper.py index 0ea5395..98f76ef 100644 --- a/custom_speech_recognition/recognizers/whisper.py +++ b/custom_speech_recognition/recognizers/whisper.py @@ -9,22 +9,19 @@ def recognize_whisper_api( recognizer, - audio_data: AudioData, + audio_data: "AudioData", *, model: str = "whisper-1", api_key: str | None = None, ): """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), - using the OpenAI Whisper API. + Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the OpenAI Whisper API. - This function requires an OpenAI account; visit https://platform.openai.com/signup, - then generate API Key in `User settings `__. + This function requires an OpenAI account; visit https://platform.openai.com/signup, then generate API Key in `User settings `__. Detail: https://platform.openai.com/docs/guides/speech-to-text - Raises a ``speech_recognition.exceptions.SetupError`` exception if there are any - issues with the openai installation, or the environment variable is missing. + Raises a ``speech_recognition.exceptions.SetupError`` exception if there are any issues with the openai installation, or the environment variable is missing. """ if not isinstance(audio_data, AudioData): raise ValueError("``audio_data`` must be an ``AudioData`` instance") From eb9d6ace49d55780a55e1f4a20a961c65a066839 Mon Sep 17 00:00:00 2001 From: Daniel Zarifpour Date: Thu, 1 Jun 2023 17:18:08 -0400 Subject: [PATCH 3/5] fix(lint): ignore custom_speech_recognition --- pyproject.toml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index c351d70..3e0bd84 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,6 @@ +[tool.black] +exclude = ["custom_speech_recognition"] + [tool.ruff] select = [ # "E", # pycodestyle @@ -6,3 +9,4 @@ select = [ "UP", # pyupgrade ] src = ["."] +exclude = ["custom_speech_recognition"] From 36162bdbd4e3ea5bfd636ce0fd159162423c2d2d Mon Sep 17 00:00:00 2001 From: Daniel Zarifpour Date: Fri, 2 Jun 2023 00:35:01 -0400 Subject: [PATCH 4/5] fix(OS compatibility): makefile & README --- Makefile | 2 +- README.md | 50 +++++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 48 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 4122c0b..091a8e5 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ precommit: make format venv: - python3 -m venv ecout_env + python -m venv ecout_env install: pip install -r requirements.txt \ No newline at end of file diff --git a/README.md b/README.md index a502ba1..f9a5826 100644 --- a/README.md +++ b/README.md @@ -110,9 +110,53 @@ Contributions are welcome! Feel free to open issues or submit pull requests to i ### Installation -1. `make venv` -2. Activate the venv: `ecout_venv` -3. `make install` +To set up the environment and install the necessary dependencies, follow these steps based on your operating system. + +#### Windows + +1. Install make on your Windows machine. + + ```shell + choco install make + ``` + +2. Create a virtual environment: + + ```shell + make venv + ``` + +3. Activate the virtual environment: + + ```shell + .\ecout_venv\Scripts\activate + ``` + +4. Install the required packages: + + ```shell + make install + ``` + +#### Linux & MacOS + +1. Create a virtual environment: + + ```shell + make venv + ``` + +2. Activate the virtual environment by running the command: + + ```shell + source ecout_venv/bin/activate + ``` + +3. Install the required python packages: + + ```shell + make install + ``` ### Code quality From 4a27ea1d753b3108242351a06912f24f5c14a3cd Mon Sep 17 00:00:00 2001 From: Daniel Zarifpour Date: Fri, 2 Jun 2023 00:40:25 -0400 Subject: [PATCH 5/5] docs: typo --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f9a5826..095c6e7 100644 --- a/README.md +++ b/README.md @@ -158,7 +158,7 @@ To set up the environment and install the necessary dependencies, follow these s make install ``` -### Code quality +### Code Quality Before submitting a pull request run `make precommit` and resolve any issues. Additionally, here are some useful commands: