diff --git a/vocode/streaming/transcriber/base_transcriber.py b/vocode/streaming/transcriber/base_transcriber.py index 8d1ea1981..0167ee90a 100644 --- a/vocode/streaming/transcriber/base_transcriber.py +++ b/vocode/streaming/transcriber/base_transcriber.py @@ -10,7 +10,7 @@ from vocode.streaming.models.model import BaseModel from vocode.streaming.models.transcriber import TranscriberConfig -from vocode.streaming.utils.FillerModel import FillerModel +from vocode.streaming.utils.back_tracking_model import BackTrackingModel from vocode.streaming.utils.interrupt_model import InterruptModel from vocode.streaming.utils.worker import AsyncWorker, ThreadAsyncWorker from vocode.utils.context_tracker.factory import ContextTrackerFactory @@ -68,7 +68,7 @@ def __init__( self.logger = logger or logging.getLogger(__name__) if self.transcriber_config.skip_on_filler_audio: - self.skip_model: FillerModel = FillerModel(logger=self.logger) + self.skip_model: BackTrackingModel = BackTrackingModel(logger=self.logger) self.interrupt_model_initialize_task = asyncio.create_task( self.skip_model.initialize_embeddings() ) diff --git a/vocode/streaming/utils/FillerModel.py b/vocode/streaming/utils/back_tracking_model.py similarity index 75% rename from vocode/streaming/utils/FillerModel.py rename to vocode/streaming/utils/back_tracking_model.py index 9f95678a5..3d3ee3961 100644 --- a/vocode/streaming/utils/FillerModel.py +++ b/vocode/streaming/utils/back_tracking_model.py @@ -1,4 +1,5 @@ import logging +import os from typing import Optional from vocode.streaming.utils.embedding_model import EmbeddingModel @@ -23,11 +24,11 @@ ] -class FillerModel(EmbeddingModel): +class BackTrackingModel(EmbeddingModel): def __init__(self, embeddings_cache_path: str = os.path.join( - os.path.dirname(__file__), "filler_embeddings"), - embeddings_file: str = 'filler_embeddings', - openai_api_key: Optional[str] = None, logger: Optional[logging.Logger] = None): + os.path.dirname(__file__), "filler_embeddings"), + embeddings_file: str = 'filler_embeddings', + openai_api_key: Optional[str] = None, logger: Optional[logging.Logger] = None): self.phrases = FillerPhrases self.strict_phrases = ["hmm", 'go on', "tell me more", "please continue"] super().__init__(embeddings_cache_path, embeddings_file, openai_api_key, logger)