diff --git a/src/agent.py b/src/agent.py index b846dd7..d85c794 100644 --- a/src/agent.py +++ b/src/agent.py @@ -10,6 +10,7 @@ RoomInputOptions, WorkerOptions, cli, + inference, metrics, ) from livekit.plugins import noise_cancellation, silero @@ -62,13 +63,15 @@ async def entrypoint(ctx: JobContext): session = AgentSession( # Speech-to-text (STT) is your agent's ears, turning the user's speech into text that the LLM can understand # See all available models at https://docs.livekit.io/agents/models/stt/ - stt="assemblyai/universal-streaming:en", + stt=inference.STT(model="assemblyai/universal-streaming", language="en"), # A Large Language Model (LLM) is your agent's brain, processing user input and generating a response # See all available models at https://docs.livekit.io/agents/models/llm/ - llm="openai/gpt-4.1-mini", + llm=inference.LLM(model="openai/gpt-4.1-mini"), # Text-to-speech (TTS) is your agent's voice, turning the LLM's text into speech that the user can hear # See all available models as well as voice selections at https://docs.livekit.io/agents/models/tts/ - tts="cartesia/sonic-2:9626c31c-bec5-4cca-baa8-f8ba9e84c8bc", + tts=inference.TTS( + model="cartesia/sonic-3", voice="9626c31c-bec5-4cca-baa8-f8ba9e84c8bc" + ), # VAD and turn detection are used to determine when the user is speaking and when the agent should respond # See more at https://docs.livekit.io/agents/build/turns turn_detection=MultilingualModel(),