Skip to content

Commit

Permalink
feat: added example of usage
Browse files Browse the repository at this point in the history
  • Loading branch information
andrewBatutin committed Sep 22, 2024
1 parent 47bffc0 commit ab7b92b
Show file tree
Hide file tree
Showing 3 changed files with 54 additions and 9 deletions.
11 changes: 10 additions & 1 deletion llama-index-packs/llama-index-packs-ersatz-o1/README.md
Original file line number Diff line number Diff line change
@@ -1 +1,10 @@
# LlamaIndex Packs Integration: Ersatz_O1
# ErsatzO1 Query Engine.

This module implements the ErsatzO1 approach, combining Chain of Thought (CoT) with
re-reading technique and SELF-CONSISTENCY prompting for generic text input.

The approach is adapted from the concepts presented in various papers on prompting techniques.

## Sources
- [Re-Reading Improves Reasoning in Large Language Models](https://arxiv.org/pdf/2309.06275)
- [SELF-CONSISTENCY IMPROVES CHAIN OF THOUGHT REASONING IN LANGUAGE MODELS](https://arxiv.org/pdf/2203.11171)
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import asyncio

from llama_index.core.llms import LLM
from llama_index.llms.openai import OpenAI

from llama_index.packs.ersatz_o1 import ErsatzO1QueryEngine

llm = OpenAI(model="gpt-4-turbo")

task_context = """
Every day, Wendi feeds each of her chickens three cups of mixed chicken feed, containing seeds, mealworms and vegetables
to help keep them healthy. She gives the chickens their feed in three separate meals. In the morning, she gives her flock of
chickens 15 cups of feed. In the afternoon, she gives her chickens another 25 cups of feed.
"""

query_engine = ErsatzO1QueryEngine(
context=task_context,
llm=llm,
reasoning_paths=5,
verbose=True,
)

if __name__ == "__main__":
res = query_engine.query("How many cups of feed does she need to give her chickens in the final meal of the day if the size of Wendi’s flock is 20 chickens?")
print(res)

Original file line number Diff line number Diff line change
Expand Up @@ -39,17 +39,22 @@
Final Answer: Provide your answer here. Ensure it is as concise as possible, without any explanation.
Confidence: Rate your confidence in the answer from 1 (low) to 5 (high).
Your response should be in the exact format:
<answer>
<confidence_value>
Replace <answer> with your actual answer and <confidence_value> with your actual confidence rating.
"""
text_prompt = PromptTemplate(template=text_prompt_str)


class FinalAnswerOutputParser(ChainableOutputParser):
"""Output parser for the ErsatzO1 approach."""

def parse(self, output: str) -> Optional[str]:
lines = output.split("Final Answer:")
if len(lines) > 1:
return lines[-1].strip()
lines = output.strip().split('\n')
if len(lines) >= 2:
return output.strip() # Return the entire formatted output
return None

def format(self, query: Any) -> str:
Expand All @@ -58,8 +63,13 @@ def format(self, query: Any) -> str:

def parse_response(response: str) -> Tuple[str, int]:
"""Parse the response from the LLM in the ErsatzO1 approach."""
answer = response.split("Final Answer:")[1].split("Confidence:")[0].strip()
confidence = int(response.split("Confidence:")[1].strip())
lines = response.strip().split('\n')
if len(lines) != 2:
raise ValueError("Response format is incorrect")

answer = lines[0].strip("'") # Remove surrounding quotes if present
confidence = int(lines[1].strip())

return answer, confidence


Expand All @@ -69,7 +79,7 @@ async def async_textual_reasoning(
llm: LLM,
num_paths: int = 5,
verbose: bool = False,
temperature: float = 0.7,
temperature: float = 0.01,
) -> List[Tuple[str, int]]:
"""
Perform asynchronous textual reasoning using the ErsatzO1 approach.
Expand Down

0 comments on commit ab7b92b

Please sign in to comment.