From 1a29725a0c3864dfa8c76cbd814b1025e44e3baa Mon Sep 17 00:00:00 2001 From: Guangya Liu Date: Thu, 12 Sep 2024 22:19:50 -0400 Subject: [PATCH] b2 (#204) --- aws/react-bedrock1.py | 128 +++++++++++++++++++++++++++++++++++++++++ openai/react-openai.py | 15 +++++ react/react1.py | 7 +-- 3 files changed, 146 insertions(+), 4 deletions(-) create mode 100644 aws/react-bedrock1.py create mode 100644 openai/react-openai.py diff --git a/aws/react-bedrock1.py b/aws/react-bedrock1.py new file mode 100644 index 0000000..8839069 --- /dev/null +++ b/aws/react-bedrock1.py @@ -0,0 +1,128 @@ +# This code is Apache 2 licensed: +# https://www.apache.org/licenses/LICENSE-2.0 + +from dotenv import load_dotenv +load_dotenv() + +import boto3 +import json + +import re +import httpx + +class ChatBot: + def __init__(self, system=""): + self.client = boto3.client(service_name="bedrock-runtime", region_name="us-west-2") + self.system = system + self.messages = "" + if self.system: + # self.messages.append({"role": "system", "content": system}) + self.messages = f"system: {system}\n" + + def __call__(self, message): + self.messages = f"{self.messages}user: {message}\n" + print("before call >>>>>>", self.messages) + result = self.execute() + self.messages = f"{self.messages}assistant: {result}\n" + print("after call >>>>>>", self.messages) + + return result + + def execute(self): + body = json.dumps({ + "inputText": self.messages, + "textGenerationConfig":{ + "maxTokenCount":70, + "stopSequences":[], #define phrases that signal the model to conclude text generation. + "temperature":0, #Temperature controls randomness; higher values increase diversity, lower values boost predictability. + "topP":0.9 # Top P is a text generation technique, sampling from the most probable tokens in a distribution. + } + }) + + response = self.client.invoke_model( + body=body, + modelId="amazon.titan-text-express-v1", + accept="application/json", + contentType="application/json" + ) + + response_body = json.loads(response.get('body').read()) + outputText = response_body.get('results')[0].get('outputText') + return outputText + +prompt = """ +You run in a loop of Thought, Action, PAUSE, Observation. +At the end of the loop you output an Answer +Use Thought to describe your thoughts about the question you have been asked. +Use Action to run one of the actions available to you - then return PAUSE. +Observation will be the result of running those actions. + +Your available actions are: + +calculate: +e.g. calculate: 4 * 7 / 3 +Runs a calculation and returns the number - uses Python so be sure to use floating point syntax if necessary + +wikipedia: +e.g. wikipedia: LLM +Returns a summary from searching Wikipedia + +Example session: + +Question: What is the capital of Hebei? +Thought: I should look up Hebei on Wikipedia +Action: wikipedia: Hebei +PAUSE + +You will be called again with this: + +Observation: Hebei is a province in China. The capital is Shijiazhuang. + +You then output: + +Answer: The capital of Hebei is Shijiazhuang +""".strip() + + +action_re = re.compile('^Action: (\w+): (.*)$') + +def query(question, max_turns=3): + i = 0 + bot = ChatBot(prompt) + next_prompt = question + while i < max_turns: + i += 1 + result = bot(next_prompt) + # print(result) + actions = [action_re.match(a) for a in result.split('\n') if action_re.match(a)] + if actions: + # There is an action to run + action, action_input = actions[0].groups() + if action not in known_actions: + raise Exception("Unknown action: {}: {}".format(action, action_input)) + print(" -- running {} {}".format(action, action_input)) + observation = known_actions[action](action_input) + print("Observation:", observation) + next_prompt = "Observation: {}".format(observation) + else: + return + + +def wikipedia(q): + return httpx.get("https://en.wikipedia.org/w/api.php", params={ + "action": "query", + "list": "search", + "srsearch": q, + "format": "json" + }).json()["query"]["search"][0]["snippet"] + + +def calculate(what): + return eval(what) + +known_actions = { + "wikipedia": wikipedia, + "calculate": calculate, +} + +query("What is the captical of Hebei") diff --git a/openai/react-openai.py b/openai/react-openai.py new file mode 100644 index 0000000..c177cae --- /dev/null +++ b/openai/react-openai.py @@ -0,0 +1,15 @@ +from dotenv import load_dotenv +load_dotenv() + +from openai import OpenAI +client = OpenAI() + +completion = client.chat.completions.create( + model="o1-preview-2024-09-12", + messages=[ + {"role": "system", "content": "You are a poetic assistant, skilled in explaining complex programming concepts with creative flair."}, + {"role": "user", "content": "Compose a poem that explains the concept of recursion in programming."} + ] +) + +print(completion) \ No newline at end of file diff --git a/react/react1.py b/react/react1.py index f18e4fb..5f9b8bb 100644 --- a/react/react1.py +++ b/react/react1.py @@ -20,15 +20,14 @@ def __init__(self, system=""): def __call__(self, message): self.messages.append({"role": "user", "content": message}) + print(self.messages) result = self.execute() self.messages.append({"role": "assistant", "content": result}) + print("after call >>>>>> ", self.messages) return result def execute(self): completion = self.client.chat.completions.create(model="gpt-3.5-turbo", messages=self.messages) - # Uncomment this to print out token usage each time, e.g. - # {"completion_tokens": 86, "prompt_tokens": 26, "total_tokens": 112} - # print(completion.usage) return completion.choices[0].message.content prompt = """ @@ -74,7 +73,7 @@ def query(question, max_turns=5): while i < max_turns: i += 1 result = bot(next_prompt) - print(result) + # print(result) actions = [action_re.match(a) for a in result.split('\n') if action_re.match(a)] if actions: # There is an action to run