Skip to content

Commit

Permalink
Update app.py
Browse files Browse the repository at this point in the history
  • Loading branch information
crossmodaldebate authored Sep 21, 2024
1 parent 49d6aa0 commit 68c5b2d
Showing 1 changed file with 49 additions and 88 deletions.
137 changes: 49 additions & 88 deletions app.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,115 +3,76 @@
import os
import json
import time
import hypernetx as hnx # Importando biblioteca para manipular hipergrafos [[4]](https://poe.com/citation?message_id=253875365866&citation=4)

client = groq.Groq()

def make_api_call(messages, max_tokens, is_final_answer=False):
for attempt in range(3):
try:
response = client.chat.completions.create(
model="llama-3.1-70b-versatile",
messages=messages,
max_tokens=max_tokens,
temperature=0.2,
response_format={"type": "json_object"}
)
return json.loads(response.choices[0].message.content)
except Exception as e:
if attempt == 2:
if is_final_answer:
return {"title": "Error", "content": f"Failed to generate final answer after 3 attempts. Error: {str(e)}"}
else:
return {"title": "Error", "content": f"Failed to generate step after 3 attempts. Error: {str(e)}", "next_action": "final_answer"}
time.sleep(1) # Wait for 1 second before retrying
# Função para construir o hipergrafo a partir do prompt
def build_hypergraph(input_data):
# Aqui, tokenizamos o input e extraímos entidades e relações
tokens = oolama.tokenize(input_data) # Tokenização com Oolama [[4]](https://poe.com/citation?message_id=253875365866&citation=4)
entities = oolama.extract_entities(tokens) # Extração de entidades
relations = oolama.extract_relations(tokens) # Extração de relações

def generate_response(prompt):
messages = [
{"role": "system", "content": """You are an expert AI assistant that utilizes heuristic and hermeneutic methods with a hypergraph architecture. For each step, provide a title that describes what you're doing in that step, along with the content explaining your reasoning.
Example of a valid JSON response:
json
{
"title": "Identifying Key Nodes",
"content": "To begin solving this problem, we need to identify the key nodes and their relationships within the hypergraph. This involves...",
"next_action": "continue"
}
"""},
{"role": "user", "content": prompt},
{"role": "assistant", "content": "Thank you! I will now think step by step following my instructions, starting at the beginning after decomposing the problem into a hypergraph architecture."}
]
# Criamos o hipergrafo utilizando hypernetx [[4]](https://poe.com/citation?message_id=253875365866&citation=4)
hypergraph = hnx.Hypergraph()

steps = []
step_count = 1
total_thinking_time = 0
# Adicionamos as entidades como nós
for entity in entities:
hypergraph.add_node(entity)

while True:
start_time = time.time()
step_data = make_api_call(messages, 300)
end_time = time.time()
thinking_time = end_time - start_time
total_thinking_time += thinking_time

steps.append((f"Step {step_count}: {step_data['title']}", step_data['content'], thinking_time))

messages.append({"role": "assistant", "content": json.dumps(step_data)})

if step_data['next_action'] == 'final_answer' or step_count > 25: # Maximum of 25 steps to prevent infinite thinking time. Can be adjusted.
break

step_count += 1
# Adicionamos as relações como hiperarestas
for relation in relations:
hypergraph.add_edge(relation['entities'], relation['type']) # Adiciona hiperarestas ao hipergrafo [[6]](https://poe.com/citation?message_id=253875365866&citation=6)

return hypergraph

# Função para realizar inferência no hipergrafo
def infer_on_hypergraph(hypergraph):
# Convertendo o hipergrafo para o formato apropriado para inferência com Oolama [[6]](https://poe.com/citation?message_id=253875365866&citation=6)
oolama_input = hnx.convert_to_oolama_format(hypergraph) # Conversão para formato compatível com Oolama
results = oolama.infer(oolama_input) # Realizando a inferência

return results

# Yield after each step for Streamlit to update
yield steps, None # We're not yielding the total time until the end
# Função para formatar os resultados
def format_hypergraph_inference_results(results):
formatted_results = oolama.format_results(results) # Formatando os resultados da inferência [[3]](https://poe.com/citation?message_id=253875365866&citation=3)
return formatted_results

# Generate final answer
messages.append({"role": "user", "content": "Please provide the final answer based on your reasoning above."})
# Função que gera a resposta baseada no hipergrafo
def generate_hypergraph_response(prompt):
# Construção inicial do hipergrafo a partir do prompt
hypergraph = build_hypergraph(prompt)

start_time = time.time()
final_data = make_api_call(messages, 200, is_final_answer=True)
end_time = time.time()
thinking_time = end_time - start_time
total_thinking_time += thinking_time
# Realiza inferências no hipergrafo
results = infer_on_hypergraph(hypergraph)

steps.append(("Final Answer", final_data['content'], thinking_time))

yield steps, total_thinking_time
# Formata os resultados para serem exibidos
formatted_results = format_hypergraph_inference_results(results)

return formatted_results

def main():
st.set_page_config(page_title="g1 prototype", page_icon="🧠", layout="wide")

st.title("g1: Using Llama-3.1 70b on Groq to create o1-like reasoning chains with Hypergraph Architecture")
st.title("g1: Usando Llama-3.1 70b no Groq com Cadeias de Raciocínio Baseadas em Arquitetura de Hipergrafos")

st.markdown("""
This is an early prototype of using heuristic and hermeneutic methods with a hypergraph architecture to improve output accuracy. It is not perfect and accuracy has yet to be formally evaluated. It is powered by Groq and Llama-3.1 70b.
Open source [repository here](https://github.com/bklieger-groq)
Este é um protótipo inicial que utiliza métodos heurísticos e hermenêuticos com uma arquitetura de hipergrafos para melhorar a precisão das saídas.
""")

# Text input for user query
user_query = st.text_input("Enter your query:", placeholder="e.g., How many 'R's are in the word strawberry?")
# Caixa de texto para a consulta do usuário
user_query = st.text_input("Insira sua consulta:", placeholder="Por exemplo, Quantos 'R's existem na palavra morango?")

if user_query:
st.write("Generating response...")
st.write("Gerando resposta com base na arquitetura de hipergrafos...")

# Create empty elements to hold the generated text and total time
response_container = st.empty()
time_container = st.empty()
# Geramos a resposta com base no hipergrafo
response = generate_hypergraph_response(user_query)

# Generate and display the response
for steps, total_thinking_time in generate_response(user_query):
with response_container.container():
for i, (title, content, thinking_time) in enumerate(steps):
if title.startswith("Final Answer"):
st.markdown(f"### {title}")
st.markdown(content.replace('\n', '<br>'), unsafe_allow_html=True)
else:
with st.expander(title, expanded=True):
st.markdown(content.replace('\n', '<br>'), unsafe_allow_html=True)

# Only show total time when it's available at the end
if total_thinking_time is not None:
time_container.markdown(f"*Total thinking time: {total_thinking_time:.2f} seconds*")
# Exibimos a resposta formatada
st.write(response)

if _name_ == "_main_":
if __name__ == "__main__":
main()

0 comments on commit 68c5b2d

Please sign in to comment.