Skip to content

Commit

Permalink
Merge pull request #135 from dbpunk-labs/133-set-the-user-octogen-for…
Browse files Browse the repository at this point in the history
…-the-container

feat: boot octogen agent service with user octogen
  • Loading branch information
imotai authored Oct 8, 2023
2 parents 9a094a2 + 579feaf commit 566b897
Show file tree
Hide file tree
Showing 8 changed files with 115 additions and 24 deletions.
72 changes: 72 additions & 0 deletions agent/src/og_agent/codellama_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,35 @@ async def handle_show_sample_code(
)
)

async def handle_install_package(self, json_response, queue, context, task_context):
package = json_response["action_input"]
code = f"!pip install {package}"
explanation = json_response["explanation"]
saved_filenames = json_response.get("saved_filenames", [])
tool_input = json.dumps({
"code": code,
"explanation": explanation,
"saved_filenames": saved_filenames,
})
await queue.put(
TaskRespond(
state=task_context.to_task_state_proto(),
respond_type=TaskRespond.OnAgentActionType,
on_agent_action=OnAgentAction(
input=tool_input, tool="execute_python_code"
),
)
)
function_result = None
async for (result, respond) in self.call_function(code, context, task_context):
if context.done():
logger.debug("the client has cancelled the request")
break
function_result = result
if respond:
await queue.put(respond)
return function_result

async def handle_function(self, json_response, queue, context, task_context):
code = json_response["action_input"]
explanation = json_response["explanation"]
Expand Down Expand Up @@ -239,6 +268,7 @@ async def arun(self, question, queue, context, max_iteration=5):
)
break
except Exception as ex:
logger.exception(f"fail to load message the message is {message}")
await queue.put(
TaskRespond(
state=task_context.to_task_state_proto(),
Expand Down Expand Up @@ -307,6 +337,48 @@ async def arun(self, question, queue, context, max_iteration=5):
)
)
break
elif (
json_response["action"] == "install_python_package"
and json_response["action_input"]
):
function_result = await self.handle_install_package(
json_response, queue, context, task_context
)
logger.debug(f"the function result {function_result}")
await queue.put(
TaskRespond(
state=task_context.to_task_state_proto(),
respond_type=TaskRespond.OnAgentActionEndType,
on_agent_action_end=OnAgentActionEnd(
output="",
output_files=function_result.saved_filenames,
has_error=function_result.has_error,
),
)
)
history.append("User:%s" % current_question)
history.append("Octogen:%s" % message)
ins = "the output of install_python_package:"
# TODO limit the output size
if function_result.has_result:
current_question = f"{ins}\n{function_result.console_stdout}"
logger.debug(
"continue to iterate with codellama with question %s"
% function_result.console_stdout
)
elif function_result.has_error:
current_question = f"{ins} \n {function_result.console_stderr}"
logger.debug(
"continue to iterate with codellama with question %s"
% function_result.console_stderr
)
else:
current_question = f"{ins} \n {function_result.console_stdout}"
logger.debug(
"continue to iterate with codellama with question %s"
% function_result.console_stdout
)

else:
result = self._format_output(json_response)
await queue.put(
Expand Down
2 changes: 1 addition & 1 deletion agent/src/og_agent/codellama_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ async def prompt(self, user_input, temperature=0, chat_history=""):
{self.ai_name}:"""
logging.info(f"{prompt}")
data = {
"n_predict": 500,
"n_predict": 1024,
"grammar": self.grammar,
"prompt": prompt,
"temperature": temperature,
Expand Down
5 changes: 3 additions & 2 deletions agent/src/og_agent/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,19 +48,20 @@
* To achieve your goal, write a plan, execute it step-by-step, and set `is_final_answer` to `true` for the last step.
* Every step must include an action with the explanation, the code block
* Ensure that the output of action meets the goal before providing the final answer.
* Try a new step if the output does not meet the goal
* Try a new step if the output does not meet the goal.
* Your code should produce output in Markdown format. For instance, if you're using a Pandas DataFrame to display data, make sure to utilize the to_markdown function.
Thirdly, the following actions are available:
* execute_python_code: This action executes Python code and returns the output. You must verify the output before giving the final answer.
* install_python_package: This action installs the Python package
* show_sample_code: This action show the sample code for user. You must set the sample code to action_input
* no_action: This action does nothing.
Fourthly, the output format must be a JSON format with the following fields:
* explanation (string): The explanation about the action input
* action (string): The name of the action.
* action_input (string): The sample code or python code to be executed for the action or an empty string if no action is specified
* action_input (string): The sample code , python code or python package to be executed for the action or an empty string if no action is specified
* saved_filenames (list of strings): A list of filenames that were created by the action input.
* language (string): The programming language used to execute the action.
* is_final_answer (boolean): Whether this is the final answer to the question. If it is, the value of this field should be true. Otherwise, the value should be false.
Expand Down
6 changes: 1 addition & 5 deletions chat/src/og_terminal/terminal_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -189,11 +189,7 @@ def handle_action_output(segments, respond, values):
if new_stderr:
total_output = new_stdout + "\n" + new_stderr
text = Text.from_ansi(total_output)
syntax = Syntax(
f"{text.plain}",
"text",
line_numbers=True,
)
syntax = Syntax(f"{text.plain}", "text")
segments.append((len(values) - 1, segment[1], syntax))


Expand Down
3 changes: 2 additions & 1 deletion docker/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
FROM ubuntu
FROM ubuntu:22.04
RUN apt update
RUN apt install -y python3 python3-pip
RUN pip install -U og_proto og_sdk og_agent og_kernel hapless
RUN adduser octogen
ADD server /bin/
ADD start_all.sh /bin/
ADD add_endpoint.sh /bin/
7 changes: 5 additions & 2 deletions docker/start_all.sh
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#! /bin/sh
#
# start_all.sh
# Copyright (C) 2023 ubuntu <ubuntu@ip-172-31-29-132>
# Copyright (C) 2023 imotai
#
# Distributed under terms of the MIT license.
#
Expand All @@ -11,9 +11,10 @@ then
echo "No arguments supplied"
exit 1
fi

ROOT_DIR=$1

chown octogen:octogen -R ${ROOT_DIR}
cat <<EOF> /bin/start_service.sh
if [ "$2" -eq 1 ]
then
if [ -z "$3" ]
Expand All @@ -38,3 +39,5 @@ do
hap status
sleep 10
done
EOF
su - octogen -c "bash /bin/start_service.sh"
24 changes: 14 additions & 10 deletions sdk/src/og_sdk/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,24 +44,28 @@ def process_char_stream(stream):
i = 0

def carriage_return(buf):
pop_buf = []
if "\n" in buf:
pop_buf = []
for _ in range(buf[::-1].index("\n")):
pop_buf.append(buf.pop())
return pop_buf[::-1]

last_pop_buf = []
while i < len(stream):
c = stream[i]
if c in ["\b", "\r"]:
# Handle escape characters
escape_dict = {
"\b": lambda buf: buf.pop() if buf else None, # backspace
"\r": carriage_return,
}
escape_dict[c](buffer)
i += 1
if c == "\b":
if buffer:
buffer.pop()
elif c == "\r":
last_pop_buf = carriage_return(buffer)
elif c == "\n":
if last_pop_buf:
buffer.extend(last_pop_buf)
last_pop_buf = []
buffer.append(c)
else:
buffer.append(c)
i += 1
i += 1
return "".join(buffer)


Expand Down
20 changes: 17 additions & 3 deletions sdk/tests/utils_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,25 +17,38 @@ def test_process_char_stream():
output1 = process_char_stream(stream0 + stream1)
output2 = process_char_stream(output1 + stream1)
assert output1 == output2
final_stream = "\r1.1 MB 100%\r\n"
output3 = process_char_stream(output2 + final_stream)
final_ouput_expected = (
" Downloading pyfiglet-1.0.2-py3-none-any.whl (1.1 MB)\n1.1 MB 100%\n"
)
assert final_ouput_expected == output3


def test_empty_string():
assert process_char_stream("") == ""


def test_single_character():
assert process_char_stream("a") == "a"


def test_multiple_characters():
assert process_char_stream("abc") == "abc"


def test_backspace():
assert process_char_stream("ab\b") == "a"


def test_carriage_return():
assert process_char_stream("ab\r") == "ab"


def test_carriage_return_with_newline():
assert process_char_stream("ab\r\n") == "ab\n"


def test_backspace_and_carriage_return():
assert process_char_stream("ab\b\r") == "a"

Expand All @@ -49,6 +62,7 @@ def test_mixed_escape_characters_and_regular_characters():


def test_special_characters():
assert process_char_stream("ab!@#$%^&*()_+{}|:\";'<>,.?/`~") == "ab!@#$%^&*()_+{}|:\";'<>,.?/`~"


assert (
process_char_stream("ab!@#$%^&*()_+{}|:\";'<>,.?/`~")
== "ab!@#$%^&*()_+{}|:\";'<>,.?/`~"
)

0 comments on commit 566b897

Please sign in to comment.