Skip to content

Commit 4bb4006

Browse files
committed
Merge branch 'dev'
Conflicts: codeinterpreterapi/session.py pyproject.toml
2 parents c10cc2c + 977252a commit 4bb4006

21 files changed

+578
-2571
lines changed

.env.example

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,6 @@
1-
OPENAI_API_KEY= # your openai api key (required)
2-
CODEBOX_API_KEY= # your codebox api key (optional, required for production)
3-
VERBOSE=False # set to True to enable verbose logging
1+
# (required)
2+
OPENAI_API_KEY=
3+
# (optional, required for production)
4+
# CODEBOX_API_KEY=
5+
# (set True to enable logging)
6+
VERBOSE=False

README.md

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,12 @@ You can run everything local except the LLM using your own OpenAI API Key.
1919
Get your OpenAI API Key [here](https://platform.openai.com/account/api-keys) and install the package.
2020

2121
```bash
22-
pip install codeinterpreterapi
22+
pip install "codeinterpreterapi[all]"
2323
```
2424

25+
Everything for local experiments are installed with the `all` extra.
26+
For deployments, you can use `pip install codeinterpreterapi` instead which does not install the additional dependencies.
27+
2528
## Usage
2629

2730
Make sure to set the `OPENAI_API_KEY` environment variable (or use a `.env` file)

codeinterpreterapi/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
from codeinterpreterapi.session import CodeInterpreterSession
2-
from codeinterpreterapi.schema import File
2+
from codeinterpreterapi.schema import File
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
from .functions_agent import OpenAIFunctionsAgent
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
# TODO: override some methods of the ConversationalAgent class
2+
# to improve the agent's performance
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
from .modifications_check import get_file_modifications
2+
from .rm_dl_link import remove_download_link
3+
from .extract_code import extract_python_code
Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
import json
2+
from typing import List, Optional
3+
4+
from langchain.base_language import BaseLanguageModel
5+
from langchain.chat_models.openai import ChatOpenAI
6+
from langchain.chat_models.anthropic import ChatAnthropic
7+
from langchain.schema import AIMessage, OutputParserException
8+
9+
# from codeinterpreterapi.prompts import extract_code_prompt
10+
11+
12+
async def extract_python_code(
13+
text: str,
14+
llm: BaseLanguageModel,
15+
retry: int = 2,
16+
) -> Optional[str]:
17+
pass
18+
19+
20+
async def test():
21+
llm = ChatAnthropic(model="claude-1.3") # type: ignore
22+
23+
code = \
24+
"""
25+
import matplotlib.pyplot as plt
26+
27+
x = list(range(1, 11))
28+
y = [29, 39, 23, 32, 4, 43, 43, 23, 43, 77]
29+
30+
plt.plot(x, y, marker='o')
31+
plt.xlabel('Index')
32+
plt.ylabel('Value')
33+
plt.title('Data Plot')
34+
35+
plt.show()
36+
"""
37+
38+
print(await extract_python_code(code, llm))
39+
40+
41+
if __name__ == "__main__":
42+
import asyncio, dotenv
43+
dotenv.load_dotenv()
44+
45+
asyncio.run(test())

codeinterpreterapi/chains/modifications_check.py

Lines changed: 32 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,10 @@
33

44
from langchain.base_language import BaseLanguageModel
55
from langchain.chat_models.openai import ChatOpenAI
6+
from langchain.chat_models.anthropic import ChatAnthropic
67
from langchain.schema import AIMessage, OutputParserException
78

8-
from codeinterpreterapi.prompts import determine_modifications_function, determine_modifications_prompt
9+
from codeinterpreterapi.prompts import determine_modifications_prompt
910

1011

1112
async def get_file_modifications(
@@ -15,44 +16,44 @@ async def get_file_modifications(
1516
) -> Optional[List[str]]:
1617
if retry < 1:
1718
return None
18-
messages = determine_modifications_prompt.format_prompt(code=code).to_messages()
19-
message = await llm.apredict_messages(messages, functions=[determine_modifications_function])
2019

21-
if not isinstance(message, AIMessage):
22-
raise OutputParserException("Expected an AIMessage")
20+
prompt = determine_modifications_prompt.format(code=code)
2321

24-
function_call = message.additional_kwargs.get("function_call", None)
22+
result = await llm.apredict(prompt, stop="```")
2523

26-
if function_call is None:
24+
25+
try:
26+
result = json.loads(result)
27+
except json.JSONDecodeError:
28+
result = ""
29+
if not result or not isinstance(result, dict) or "modifications" not in result:
2730
return await get_file_modifications(code, llm, retry=retry - 1)
28-
else:
29-
function_call = json.loads(function_call["arguments"])
30-
return function_call["modifications"]
31-
31+
return result["modifications"]
32+
3233

3334
async def test():
34-
llm = ChatOpenAI(model="gpt-3.5") # type: ignore
35-
36-
code = """
37-
import matplotlib.pyplot as plt
38-
39-
x = list(range(1, 11))
40-
y = [29, 39, 23, 32, 4, 43, 43, 23, 43, 77]
41-
42-
plt.plot(x, y, marker='o')
43-
plt.xlabel('Index')
44-
plt.ylabel('Value')
45-
plt.title('Data Plot')
46-
47-
plt.show()
48-
"""
49-
35+
llm = ChatAnthropic(model="claude-1.3") # type: ignore
36+
37+
code = \
38+
"""
39+
import matplotlib.pyplot as plt
40+
41+
x = list(range(1, 11))
42+
y = [29, 39, 23, 32, 4, 43, 43, 23, 43, 77]
43+
44+
plt.plot(x, y, marker='o')
45+
plt.xlabel('Index')
46+
plt.ylabel('Value')
47+
plt.title('Data Plot')
48+
49+
plt.show()
50+
"""
51+
5052
print(await get_file_modifications(code, llm))
51-
53+
5254

5355
if __name__ == "__main__":
54-
import asyncio
55-
from dotenv import load_dotenv
56-
load_dotenv()
56+
import asyncio, dotenv
57+
dotenv.load_dotenv()
5758

5859
asyncio.run(test())

0 commit comments

Comments
 (0)