Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update README for session argument #1652

Draft
wants to merge 2 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/python.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ on:
- 'platform/**'

env:
PYTHON_VERSION: "3.11"
PYTHON_VERSION: "3.10"

jobs:
black:
Expand Down
13 changes: 13 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -125,3 +125,16 @@ Our contributors have made this project possible. Thank you! 🙏
<div align="center">
<sub>Made with <a href="https://contrib.rocks">contrib.rocks</a>.</sub>
</div>

## Arguments for OpenAIAgentService

The `OpenAIAgentService` class is defined in `platform/reworkd_platform/web/api/agent/agent_service/open_ai_agent_service.py`.

The constructor of `OpenAIAgentService` takes the following arguments:

- `model`: The model to be used.
- `settings`: The settings for the model.
- `token_service`: The token service for managing tokens.
- `callbacks`: Optional list of callback handlers.
- `user`: The user information.
- `oauth_crud`: The OAuth CRUD operations.
2 changes: 1 addition & 1 deletion next/src/server/api/routers/agentRouter.ts
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ async function generateAgentName(goal: string) {
`,
},
],
model: "gpt-3.5-turbo",
model: "llama3.2",
});

// @ts-ignore
Expand Down
2 changes: 1 addition & 1 deletion next/src/stores/modelSettingsStore.ts
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ export const useModelSettingsStore = createSelectors(
partialize: (state) => ({
modelSettings: {
...state.modelSettings,
customModelName: "gpt-3.5-turbo",
customModelName: "llama3.2",
maxTokens: Math.min(state.modelSettings.maxTokens, 4000),
},
}),
Expand Down
8 changes: 5 additions & 3 deletions next/src/types/modelSettings.ts
Original file line number Diff line number Diff line change
@@ -1,17 +1,19 @@
import { type Language } from "../utils/languages";

export const [GPT_35_TURBO, GPT_35_TURBO_16K, GPT_4] = [
export const [GPT_35_TURBO, GPT_35_TURBO_16K, GPT_4, LLAMA_3_2] = [
"gpt-3.5-turbo" as const,
"gpt-3.5-turbo-16k" as const,
"gpt-4" as const,
"llama3.2" as const,
];
export const GPT_MODEL_NAMES = [GPT_35_TURBO, GPT_35_TURBO_16K, GPT_4];
export type GPTModelNames = "gpt-3.5-turbo" | "gpt-3.5-turbo-16k" | "gpt-4";
export const GPT_MODEL_NAMES = [GPT_35_TURBO, GPT_35_TURBO_16K, GPT_4, LLAMA_3_2];
export type GPTModelNames = "gpt-3.5-turbo" | "gpt-3.5-turbo-16k" | "gpt-4" | "llama3.2";

export const MAX_TOKENS: Record<GPTModelNames, number> = {
"gpt-3.5-turbo": 4000,
"gpt-3.5-turbo-16k": 16000,
"gpt-4": 4000,
"llama3.2": 4000,
};

export interface ModelSettings {
Expand Down
5 changes: 4 additions & 1 deletion platform/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM python:3.11-slim-buster as prod
FROM python:3.10-slim-buster as prod

RUN apt-get update && apt-get install -y \
default-libmysqlclient-dev \
Expand Down Expand Up @@ -30,6 +30,9 @@ RUN apt-get purge -y \
COPY . /app/src/
RUN poetry install --only main

# Install ollama
RUN pip install ollama

CMD ["/usr/local/bin/python", "-m", "reworkd_platform"]

FROM prod as dev
Expand Down
245 changes: 245 additions & 0 deletions platform/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -149,3 +149,248 @@ poetry run pytest -vv --cov="reworkd_platform" .
poetry self add poetry-plugin-up
poetry up --latest
```

## Installing the package using pip

To install the `reworkd_platform` package using pip, run the following command:

```bash
pip install reworkd_platform
```

## Using the package in any code

To use the `reworkd_platform` package in your code, you can import it as follows:

```python
import reworkd_platform

# Example usage
reworkd_platform.some_function()
```

## Using pip functions

The `reworkd_platform` package provides several functions for interacting with agents. Here are some examples:

### Starting a goal agent

```python
from reworkd_platform.web.api.agent.agent_service.open_ai_agent_service import OpenAIAgentService
from reworkd_platform.web.api.agent.model_factory import WrappedChatOpenAI
from reworkd_platform.schemas.agent import ModelSettings
from reworkd_platform.services.tokenizer.token_service import TokenService
from reworkd_platform.db.crud.oauth import OAuthCrud
from reworkd_platform.schemas.user import UserBase

# Initialize the OpenAIAgentService
model = WrappedChatOpenAI(model_name="gpt-3.5-turbo")
settings = ModelSettings(language="en")
token_service = TokenService.create()
callbacks = None
user = UserBase(id=1, name="John Doe")
oauth_crud = OAuthCrud()

agent_service = OpenAIAgentService(model, settings, token_service, callbacks, user, oauth_crud)

# Start a goal agent
tasks = agent_service.pip_start_goal_agent(goal="Your goal here")
print(tasks)
```

### Analyzing a task agent

```python
from reworkd_platform.web.api.agent.agent_service.open_ai_agent_service import OpenAIAgentService
from reworkd_platform.web.api.agent.model_factory import WrappedChatOpenAI
from reworkd_platform.schemas.agent import ModelSettings
from reworkd_platform.services.tokenizer.token_service import TokenService
from reworkd_platform.db.crud.oauth import OAuthCrud
from reworkd_platform.schemas.user import UserBase

# Initialize the OpenAIAgentService
model = WrappedChatOpenAI(model_name="gpt-3.5-turbo")
settings = ModelSettings(language="en")
token_service = TokenService.create()
callbacks = None
user = UserBase(id=1, name="John Doe")
oauth_crud = OAuthCrud()

agent_service = OpenAIAgentService(model, settings, token_service, callbacks, user, oauth_crud)

# Analyze a task agent
analysis = agent_service.pip_analyze_task_agent(goal="Your goal here", task="Your task here", tool_names=["tool1", "tool2"])
print(analysis)
```

### Executing a task agent

```python
from reworkd_platform.web.api.agent.agent_service.open_ai_agent_service import OpenAIAgentService
from reworkd_platform.web.api.agent.model_factory import WrappedChatOpenAI
from reworkd_platform.schemas.agent import ModelSettings
from reworkd_platform.services.tokenizer.token_service import TokenService
from reworkd_platform.db.crud.oauth import OAuthCrud
from reworkd_platform.schemas.user import UserBase

# Initialize the OpenAIAgentService
model = WrappedChatOpenAI(model_name="gpt-3.5-turbo")
settings = ModelSettings(language="en")
token_service = TokenService.create()
callbacks = None
user = UserBase(id=1, name="John Doe")
oauth_crud = OAuthCrud()

agent_service = OpenAIAgentService(model, settings, token_service, callbacks, user, oauth_crud)

# Execute a task agent
response = agent_service.pip_execute_task_agent(goal="Your goal here", task="Your task here", analysis=analysis)
print(response)
```

### Creating tasks agent

```python
from reworkd_platform.web.api.agent.agent_service.open_ai_agent_service import OpenAIAgentService
from reworkd_platform.web.api.agent.model_factory import WrappedChatOpenAI
from reworkd_platform.schemas.agent import ModelSettings
from reworkd_platform.services.tokenizer.token_service import TokenService
from reworkd_platform.db.crud.oauth import OAuthCrud
from reworkd_platform.schemas.user import UserBase

# Initialize the OpenAIAgentService
model = WrappedChatOpenAI(model_name="gpt-3.5-turbo")
settings = ModelSettings(language="en")
token_service = TokenService.create()
callbacks = None
user = UserBase(id=1, name="John Doe")
oauth_crud = OAuthCrud()

agent_service = OpenAIAgentService(model, settings, token_service, callbacks, user, oauth_crud)

# Create tasks agent
tasks = agent_service.pip_create_tasks_agent(goal="Your goal here", tasks=["task1", "task2"], last_task="Your last task here", result="Your result here")
print(tasks)
```

### Summarizing task agent

```python
from reworkd_platform.web.api.agent.agent_service.open_ai_agent_service import OpenAIAgentService
from reworkd_platform.web.api.agent.model_factory import WrappedChatOpenAI
from reworkd_platform.schemas.agent import ModelSettings
from reworkd_platform.services.tokenizer.token_service import TokenService
from reworkd_platform.db.crud.oauth import OAuthCrud
from reworkd_platform.schemas.user import UserBase

# Initialize the OpenAIAgentService
model = WrappedChatOpenAI(model_name="gpt-3.5-turbo")
settings = ModelSettings(language="en")
token_service = TokenService.create()
callbacks = None
user = UserBase(id=1, name="John Doe")
oauth_crud = OAuthCrud()

agent_service = OpenAIAgentService(model, settings, token_service, callbacks, user, oauth_crud)

# Summarize task agent
response = agent_service.pip_summarize_task_agent(goal="Your goal here", results=["result1", "result2"])
print(response)
```

### Chatting with agent

```python
from reworkd_platform.web.api.agent.agent_service.open_ai_agent_service import OpenAIAgentService
from reworkd_platform.web.api.agent.model_factory import WrappedChatOpenAI
from reworkd_platform.schemas.agent import ModelSettings
from reworkd_platform.services.tokenizer.token_service import TokenService
from reworkd_platform.db.crud.oauth import OAuthCrud
from reworkd_platform.schemas.user import UserBase

# Initialize the OpenAIAgentService
model = WrappedChatOpenAI(model_name="gpt-3.5-turbo")
settings = ModelSettings(language="en")
token_service = TokenService.create()
callbacks = None
user = UserBase(id=1, name="John Doe")
oauth_crud = OAuthCrud()

agent_service = OpenAIAgentService(model, settings, token_service, callbacks, user, oauth_crud)

# Chat with agent
response = agent_service.pip_chat(message="Your message here", results=["result1", "result2"])
print(response)
```

## Using ollama

The `reworkd_platform` package also provides support for `ollama`. Here are some examples:

### Adding ollama as a dependency

To add `ollama` as a dependency, include it in your `pyproject.toml` file under `[tool.poetry.dependencies]`:

```toml
[tool.poetry.dependencies]
ollama = "^0.1.0"
```

### Installing ollama in Docker

To install `ollama` in the Docker image, add the following command to your `Dockerfile`:

```dockerfile
# Install ollama
RUN pip install ollama
```

### Using ollama in your code

To use `ollama` in your code, you can import it as follows:

```python
import ollama

# Example usage
model = ollama.Ollama(model="llama3.2")
chain = model.create_chain(prompt="Your prompt here")
response = chain.run("Your input here")
print(response)
```

## Using Python 3.10

The `reworkd_platform` package is compatible with Python 3.10. Here are some examples:

### Specifying Python 3.10 in `pyproject.toml`

To specify Python 3.10 as the required version, include the following in your `pyproject.toml` file:

```toml
[tool.poetry.dependencies]
python = "^3.10"
```

### Using Python 3.10 in Docker

To use Python 3.10 in the Docker image, update the base image in your `Dockerfile`:

```dockerfile
FROM python:3.10-slim-buster as prod
```

### Running the project with Python 3.10

To run the project with Python 3.10, make sure you have Python 3.10 installed on your system. You can download and install Python 3.10 from the official Python website: https://www.python.org/downloads/release/python-3100/

Once you have Python 3.10 installed, you can create a virtual environment and install the dependencies using Poetry:

```bash
python3.10 -m venv venv
source venv/bin/activate
poetry install
poetry run python -m reworkd_platform
```

This will start the server on the configured host using Python 3.10.
7 changes: 4 additions & 3 deletions platform/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ maintainers = [
readme = "README.md"

[tool.poetry.dependencies]
python = "^3.11"
python = "^3.10"
fastapi = "^0.98.0"
boto3 = "^1.28.51"
uvicorn = { version = "^0.22.0", extras = ["standard"] }
Expand All @@ -41,6 +41,7 @@ botocore = "^1.31.51"
stripe = "^5.5.0"
cryptography = "^41.0.4"
httpx = "^0.25.0"
ollama = "^0.1.0"


[tool.poetry.dev-dependencies]
Expand Down Expand Up @@ -96,5 +97,5 @@ env = [
]

[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
requires = ["poetry-core>=1.0.0", "setuptools", "wheel"]
build-backend = "setuptools.build_meta"
3 changes: 3 additions & 0 deletions platform/reworkd_platform/db/crud/oauth.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@


class OAuthCrud(BaseCrud):
def __init__(self, session: AsyncSession):
super().__init__(session)

@classmethod
async def inject(
cls,
Expand Down
4 changes: 4 additions & 0 deletions platform/reworkd_platform/db/crud/user.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,13 @@
from reworkd_platform.db.crud.base import BaseCrud
from reworkd_platform.db.models.auth import OrganizationUser
from reworkd_platform.db.models.user import UserSession
from sqlalchemy.ext.asyncio import AsyncSession


class UserCrud(BaseCrud):
def __init__(self, session: AsyncSession):
super().__init__(session)

async def get_user_session(self, token: str) -> UserSession:
query = (
select(UserSession)
Expand Down
2 changes: 2 additions & 0 deletions platform/reworkd_platform/schemas/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-4",
"llama3.2",
]
Loop_Step = Literal[
"start",
Expand All @@ -22,6 +23,7 @@
"gpt-3.5-turbo": 4000,
"gpt-3.5-turbo-16k": 16000,
"gpt-4": 8000,
"llama3.2": 4000,
}


Expand Down
Loading