Skip to content

Commit

Permalink
b2 (#204)
Browse files Browse the repository at this point in the history
  • Loading branch information
gyliu513 committed Sep 13, 2024
1 parent 54582a3 commit 1a29725
Show file tree
Hide file tree
Showing 3 changed files with 146 additions and 4 deletions.
128 changes: 128 additions & 0 deletions aws/react-bedrock1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
# This code is Apache 2 licensed:
# https://www.apache.org/licenses/LICENSE-2.0

from dotenv import load_dotenv
load_dotenv()

import boto3
import json

import re
import httpx

class ChatBot:
def __init__(self, system=""):
self.client = boto3.client(service_name="bedrock-runtime", region_name="us-west-2")
self.system = system
self.messages = ""
if self.system:
# self.messages.append({"role": "system", "content": system})
self.messages = f"system: {system}\n"

def __call__(self, message):
self.messages = f"{self.messages}user: {message}\n"
print("before call >>>>>>", self.messages)
result = self.execute()
self.messages = f"{self.messages}assistant: {result}\n"
print("after call >>>>>>", self.messages)

return result

def execute(self):
body = json.dumps({
"inputText": self.messages,
"textGenerationConfig":{
"maxTokenCount":70,
"stopSequences":[], #define phrases that signal the model to conclude text generation.
"temperature":0, #Temperature controls randomness; higher values increase diversity, lower values boost predictability.
"topP":0.9 # Top P is a text generation technique, sampling from the most probable tokens in a distribution.
}
})

response = self.client.invoke_model(
body=body,
modelId="amazon.titan-text-express-v1",
accept="application/json",
contentType="application/json"
)

response_body = json.loads(response.get('body').read())
outputText = response_body.get('results')[0].get('outputText')
return outputText

prompt = """
You run in a loop of Thought, Action, PAUSE, Observation.
At the end of the loop you output an Answer
Use Thought to describe your thoughts about the question you have been asked.
Use Action to run one of the actions available to you - then return PAUSE.
Observation will be the result of running those actions.
Your available actions are:
calculate:
e.g. calculate: 4 * 7 / 3
Runs a calculation and returns the number - uses Python so be sure to use floating point syntax if necessary
wikipedia:
e.g. wikipedia: LLM
Returns a summary from searching Wikipedia
Example session:
Question: What is the capital of Hebei?
Thought: I should look up Hebei on Wikipedia
Action: wikipedia: Hebei
PAUSE
You will be called again with this:
Observation: Hebei is a province in China. The capital is Shijiazhuang.
You then output:
Answer: The capital of Hebei is Shijiazhuang
""".strip()


action_re = re.compile('^Action: (\w+): (.*)$')

def query(question, max_turns=3):
i = 0
bot = ChatBot(prompt)
next_prompt = question
while i < max_turns:
i += 1
result = bot(next_prompt)
# print(result)
actions = [action_re.match(a) for a in result.split('\n') if action_re.match(a)]
if actions:
# There is an action to run
action, action_input = actions[0].groups()
if action not in known_actions:
raise Exception("Unknown action: {}: {}".format(action, action_input))
print(" -- running {} {}".format(action, action_input))
observation = known_actions[action](action_input)
print("Observation:", observation)
next_prompt = "Observation: {}".format(observation)
else:
return


def wikipedia(q):
return httpx.get("https://en.wikipedia.org/w/api.php", params={
"action": "query",
"list": "search",
"srsearch": q,
"format": "json"
}).json()["query"]["search"][0]["snippet"]


def calculate(what):
return eval(what)

known_actions = {
"wikipedia": wikipedia,
"calculate": calculate,
}

query("What is the captical of Hebei")
15 changes: 15 additions & 0 deletions openai/react-openai.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
from dotenv import load_dotenv
load_dotenv()

from openai import OpenAI
client = OpenAI()

completion = client.chat.completions.create(
model="o1-preview-2024-09-12",
messages=[
{"role": "system", "content": "You are a poetic assistant, skilled in explaining complex programming concepts with creative flair."},
{"role": "user", "content": "Compose a poem that explains the concept of recursion in programming."}
]
)

print(completion)
7 changes: 3 additions & 4 deletions react/react1.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,15 +20,14 @@ def __init__(self, system=""):

def __call__(self, message):
self.messages.append({"role": "user", "content": message})
print(self.messages)
result = self.execute()
self.messages.append({"role": "assistant", "content": result})
print("after call >>>>>> ", self.messages)
return result

def execute(self):
completion = self.client.chat.completions.create(model="gpt-3.5-turbo", messages=self.messages)
# Uncomment this to print out token usage each time, e.g.
# {"completion_tokens": 86, "prompt_tokens": 26, "total_tokens": 112}
# print(completion.usage)
return completion.choices[0].message.content

prompt = """
Expand Down Expand Up @@ -74,7 +73,7 @@ def query(question, max_turns=5):
while i < max_turns:
i += 1
result = bot(next_prompt)
print(result)
# print(result)
actions = [action_re.match(a) for a in result.split('\n') if action_re.match(a)]
if actions:
# There is an action to run
Expand Down

0 comments on commit 1a29725

Please sign in to comment.