diff --git a/.env.sample b/.env.sample index 51bbc47..8eb47e3 100644 --- a/.env.sample +++ b/.env.sample @@ -1,5 +1,9 @@ GEMINI_PROJECT_ID= +# NOTE: Authentication is mutually exclusive. Please provide only one method at a time. +# [REQUIRED] By default, please set GEMINI_API_KEY for API key authentication. +# If you want to use Vertex AI authentication instead, unset GEMINI_API_KEY and set GOOGLE_APPLICATION_CREDENTIALS GEMINI_API_KEY= +# GOOGLE_APPLICATION_CREDENTIALS= GITHUB_TOKEN= OPENROUTER_API_KEY = OPENROUTER_MODEL = \ No newline at end of file diff --git a/utils/call_llm.py b/utils/call_llm.py index 714d4dd..5f3b256 100644 --- a/utils/call_llm.py +++ b/utils/call_llm.py @@ -48,17 +48,19 @@ def call_llm(prompt: str, use_cache: bool = True) -> str: return cache[prompt] # Call the LLM if not in cache or cache disabled - # client = genai.Client( - # vertexai=True, - # # TODO: change to your own project id and location - # project=os.getenv("GEMINI_PROJECT_ID", "your-project-id"), - # location=os.getenv("GEMINI_LOCATION", "us-central1") - # ) - - # You can comment the previous line and use the AI Studio key instead: - client = genai.Client( - api_key=os.getenv("GEMINI_API_KEY", ""), - ) + # You can override authentication by setting GEMINI_API_KEY (for API key) or GEMINI_PROJECT_ID/GEMINI_LOCATION (for Vertex AI) via a .env file or OS environment variables. + # Authentication: Use API key if GEMINI_API_KEY is set, otherwise use Vertex AI + if os.getenv("GEMINI_API_KEY"): + client = genai.Client( + api_key=os.getenv("GEMINI_API_KEY", ""), + ) + else: + client = genai.Client( + vertexai=True, + project=os.getenv("GEMINI_PROJECT_ID", "your-project-id"), + location=os.getenv("GEMINI_LOCATION", "us-central1") + ) + # model = os.getenv("GEMINI_MODEL", "gemini-2.5-pro-exp-03-25") model = os.getenv("GEMINI_MODEL", "gemini-2.0-flash-exp")