Skip to content

Commit 47af12e

Browse files
committed
replace Weaver's custom credential management with a lightweight abstraction layer that leverages litellm's native credential resolution
1 parent f7d9ddd commit 47af12e

File tree

6 files changed

+264
-82
lines changed

6 files changed

+264
-82
lines changed

weaver.toml

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
# Example weaver.toml configuration file
2+
# Place this in your project root or home directory
3+
4+
# litellm-specific configuration
5+
[litellm]
6+
# Enable verbose logging for debugging
7+
set_verbose = true
8+
9+
# Drop parameters that aren't supported by certain providers
10+
drop_params = true
11+
12+
# Custom API bases for different providers (optional)
13+
[api_bases]
14+
# Use a custom OpenAI-compatible endpoint
15+
# openai = "https://api.custom-openai.com/v1"
16+
17+
# Use Azure OpenAI
18+
# azure = "https://your-resource.openai.azure.com"
19+
20+
# Example credentials (these would typically be set as environment variables instead)
21+
# [credentials]
22+
# openai_api_key = "sk-..."
23+
# anthropic_api_key = "sk-ant-..."
24+
# google_api_key = "AI..."
25+
26+
# Note: It's recommended to use environment variables for credentials:
27+
# export OPENAI_API_KEY="sk-..."
28+
# export ANTHROPIC_API_KEY="sk-ant-..."
29+
# export GOOGLE_API_KEY="AI..."
30+
# export AZURE_API_KEY="..."
31+
# export COHERE_API_KEY="..."

weaver/agent.py

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,6 @@
11
"""
22
Agent: Executes individual tasks via an LLM, handling retries and updating the Blueprint.
33
"""
4-
# weaver/agent.py
5-
64
import time
75
from datetime import datetime
86
import litellm
@@ -62,17 +60,23 @@ def execute_task(self, task_id: int):
6260

6361
# 2) Attempt LLM call with retries
6462
last_error = None
63+
response = None
64+
6565
for attempt in range(1, 4):
6666
try:
67+
# Use litellm.completion with the model name directly
68+
# litellm handles provider routing and authentication
6769
response = litellm.completion(
6870
model=model_cfg["model"],
69-
prompt=prompt,
71+
messages=[{"role": "user", "content": prompt}],
7072
max_tokens=model_cfg.get("max_tokens")
7173
)
7274
break
7375
except Exception as e:
7476
last_error = e
75-
time.sleep(2 ** attempt) # exponential backoff
77+
print(f"[weaver] Task {task_id} attempt {attempt} failed: {e}")
78+
if attempt < 3:
79+
time.sleep(2 ** attempt) # exponential backoff
7680
else:
7781
# 3a) All retries failed: record failure
7882
err_msg = str(last_error)
@@ -93,6 +97,8 @@ def execute_task(self, task_id: int):
9397
usage = response.get("usage", {})
9498
prompt_tokens = usage.get("prompt_tokens", 0)
9599
completion_tokens = usage.get("completion_tokens", 0)
100+
101+
# Calculate costs using the model's rate information
96102
rates = model_cfg.get("cost_per_1k_tokens", {})
97103
cost = (
98104
prompt_tokens / 1_000 * rates.get("prompt", 0) +
@@ -114,4 +120,4 @@ def execute_task(self, task_id: int):
114120
cost=cost,
115121
execution_end_timestamp=end_ts
116122
)
117-
self.blueprint.update_task_status(task_id, "awaiting_human_approval")
123+
self.blueprint.update_task_status(task_id, "awaiting_human_approval")

weaver/blueprint.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,8 @@ def __init__(self, db_path: str):
4444
except Exception as e:
4545
raise DatabaseError(f"Failed to connect to SQLite DB at '{db_path}': {e}")
4646

47+
48+
4749
def _initialize_table(self) -> None:
4850
"""
4951
Create tasks table if not exists, according to the defined schema.
@@ -105,8 +107,6 @@ def get_task(self, task_id: int) -> dict:
105107
row = self._execute_query(sql, (task_id,), fetch='one')
106108
if not row:
107109
return None
108-
cols = [col[0] for col in self.conn.cursor().description] if False else []
109-
# Actually get column names via PRAGMA
110110
cols = [info[1] for info in self._execute_query("PRAGMA table_info(tasks)", fetch='all')]
111111
return dict(zip(cols, row))
112112

weaver/cli.py

Lines changed: 39 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1,35 +1,23 @@
1-
# weaver/cli.py
2-
31
import sys
42
import click
53
from weaver.project import Project
64
from weaver.exceptions import WeaverError
7-
from weaver.config import get_openai_api_key
5+
from weaver.config import check_environment, get_missing_credentials
86

97
@click.group()
10-
@click.option(
11-
"--api-key", "-k",
12-
help="Your OpenAI API key (overrides env or config file).",
13-
)
148
@click.pass_context
15-
def cli(ctx, api_key):
9+
def cli(ctx):
1610
"""
17-
python-weaver CLI.
11+
python-weaver CLI: orchestrate long-duration LLM workflows.
1812
"""
19-
# Resolve and stash the key in context
2013
ctx.ensure_object(dict)
21-
ctx.obj["OPENAI_API_KEY"] = get_openai_api_key(api_key)
22-
23-
# Then in commands where you call litellm or any SDK, inject ctx.obj["OPENAI_API_KEY"]
24-
# For example:
25-
@cli.command()
26-
@click.argument("project_name")
27-
@click.argument("project_goal")
28-
@click.pass_context
29-
30-
def cli():
31-
"""python-weaver CLI: orchestrate long-duration LLM workflows."""
32-
pass
14+
15+
# Check environment on startup
16+
env_error = check_environment()
17+
if env_error:
18+
click.echo(f"[weaver][error] {env_error}", err=True)
19+
click.echo("\nFor setup instructions, see: https://docs.litellm.ai/docs/providers", err=True)
20+
sys.exit(1)
3321

3422
@cli.command()
3523
@click.argument("project_name")
@@ -111,8 +99,36 @@ def run(project_name: str, no_human_feedback: bool, steps: int):
11199
click.echo(f"[weaver][error] {e}", err=True)
112100
sys.exit(1)
113101

102+
@cli.command()
103+
def check():
104+
"""
105+
Check credential configuration for all providers.
106+
"""
107+
missing = get_missing_credentials()
108+
109+
if not missing:
110+
click.echo("[weaver] ✓ All configured models have valid credentials.")
111+
return
112+
113+
click.echo("[weaver] Credential status:")
114+
115+
# Show missing credentials
116+
for provider, creds in missing.items():
117+
cred_list = " or ".join(creds)
118+
click.echo(f" ❌ {provider}: Missing {cred_list}")
119+
120+
# Show available models
121+
from weaver.config import LLM_CONFIG, validate_model_credentials
122+
click.echo("\n[weaver] Model availability:")
123+
124+
for model_key in LLM_CONFIG["available_llms"]:
125+
status = "✓" if validate_model_credentials(model_key) else "❌"
126+
click.echo(f" {status} {model_key}")
127+
128+
click.echo("\nFor setup instructions, see: https://docs.litellm.ai/docs/providers")
129+
114130
def main():
115131
cli()
116132

117133
if __name__ == "__main__":
118-
main()
134+
main()

0 commit comments

Comments
 (0)