-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathaispeech.py
74 lines (55 loc) · 1.87 KB
/
aispeech.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import time
from pathlib import Path
import torch
import pyaudio
import wave
import sys
from array import array
from struct import pack
torch._C._jit_set_profiling_mode(False)
VOICE_OUTPUT_FILENAME = "audioResponse.wav"
#so sending input from file to file lol
device = torch.device('cpu')
sample_rate = 48000
speaker='en_21'
params = {
'activate': True,
'speaker': 'en_21',
'language': 'en',
'model_id': 'v3_en',
'sample_rate': 48000,
'device': 'cpu',
'show_text': True,
'autoplay': True,
'voice_pitch': 'medium',
'voice_speed': 'medium',
}
# why is it called oobapi for a function?????
def initialize(text):
content = text
model, example_text = torch.hub.load(repo_or_dir='snakers4/silero-models', model='silero_tts', language=params['language'], speaker=params['model_id'])
model.to(params['device'])
output_file = Path(f'audioResponse.wav')
model.save_wav(text=content,
speaker=speaker,
sample_rate=sample_rate,audio_path=str(output_file))
print("******************Ai SPEEKING***************************")
CHUNK = 1024
wf = wave.open("audioResponse.wav", 'rb')
print("ai speaking:", content)
p=pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
data = wf.readframes(CHUNK)
while len(data)>0:
stream.write(data)
data = wf.readframes(CHUNK)
stream.stop_stream()
stream.close()
p.terminate()
time.sleep(0.1)
if __name__ == '__main__':
oobaapi()
PlayAudio()