diff --git a/examples/agent.py b/examples/agent.py
index fb89bd2b6..ae5727fc8 100644
--- a/examples/agent.py
+++ b/examples/agent.py
@@ -18,11 +18,24 @@
salaries_df = pd.DataFrame(salaries_data)
-llm = OpenAI("OPEN_API")
+llm = OpenAI("sk-lyDyNVyBwnykr1lJ4Yc7T3BlbkFJtJNyJlKTAvUa2E2D5Wdb44")
agent = Agent([employees_df, salaries_df], config={"llm": llm}, memory_size=10)
+
+# Chat with the agent
response = agent.chat("Who gets paid the most?")
print(response)
-questions = agent.clarification_questions()
-print(questions)
-response = agent.chat("Which department he belongs to?")
+
+
+# Get Clarification Questions
+response = agent.clarification_questions()
+
+if response:
+ for question in response.questions:
+ print(question)
+else:
+ print(response.message)
+
+
+# Explain how the chat response is generated
+response = agent.explain()
print(response)
diff --git a/pandasai/agent/__init__.py b/pandasai/agent/__init__.py
index 4707ee48c..6ae93b42d 100644
--- a/pandasai/agent/__init__.py
+++ b/pandasai/agent/__init__.py
@@ -1,9 +1,12 @@
import json
from typing import Union, List, Optional
+from pandasai.agent.response import ClarificationResponse
from pandasai.helpers.df_info import DataFrameType
from pandasai.helpers.logger import Logger
from pandasai.helpers.memory import Memory
+from pandasai.prompts.base import Prompt
from pandasai.prompts.clarification_questions_prompt import ClarificationQuestionPrompt
+from pandasai.prompts.explain_prompt import ExplainPrompt
from pandasai.schemas.df_config import Config
from pandasai.smart_datalake import SmartDatalake
@@ -23,7 +26,7 @@ def __init__(
dfs: Union[DataFrameType, List[DataFrameType]],
config: Optional[Union[Config, dict]] = None,
logger: Logger = None,
- memory_size=1,
+ memory_size: int = 1,
):
"""
Args:
@@ -36,6 +39,7 @@ def __init__(
self._lake = SmartDatalake(dfs, config, logger)
self.logger = self._lake.logger
+ # For the conversation multiple the memory size by 2
self._memory = Memory(memory_size * 2)
def _get_conversation(self):
@@ -51,17 +55,26 @@ def _get_conversation(self):
]
)
- def chat(self, query: str):
+ def chat(self, query: str, output_type: Optional[str] = None):
"""
Simulate a chat interaction with the assistant on Dataframe.
"""
- self._memory.add(query, True)
- conversation = self._get_conversation()
- result = self._lake.chat(query, start_conversation=conversation)
- self._memory.add(result, False)
- return result
+ try:
+ self._memory.add(query, True)
+ conversation = self._get_conversation()
+ result = self._lake.chat(
+ query, output_type=output_type, start_conversation=conversation
+ )
+ self._memory.add(result, False)
+ return result
+ except Exception as exception:
+ return (
+ "Unfortunately, I was not able to get your answers, "
+ "because of the following error:\n"
+ f"\n{exception}\n"
+ )
- def _get_clarification_prompt(self):
+ def _get_clarification_prompt(self) -> Prompt:
"""
Create a clarification prompt with relevant variables.
"""
@@ -70,31 +83,54 @@ def _get_clarification_prompt(self):
prompt.set_var("conversation", self._get_conversation())
return prompt
- def clarification_questions(self):
+ def clarification_questions(self) -> ClarificationResponse:
"""
- Generate and return up to three clarification questions based on a given prompt.
+ Generate clarification questions based on the data
"""
try:
prompt = self._get_clarification_prompt()
- result = self._lake.llm.generate_code(prompt)
- questions = json.loads(result)
+ result = self._lake.llm.call(prompt)
+ self.logger.log(
+ f"""Clarification Questions: {result}
+ """
+ )
+ questions: list[str] = json.loads(result)
+ return ClarificationResponse(
+ success=True, questions=questions[:3], message=result
+ )
except Exception as exception:
- return (
+ return ClarificationResponse(
+ False,
+ [],
"Unfortunately, I was not able to get your clarification questions, "
"because of the following error:\n"
- f"\n{exception}\n"
+ f"\n{exception}\n",
)
- return questions[:3]
-
- def start_new_conversation(self):
+ def start_new_conversation(self) -> True:
"""
Clears the previous conversation
"""
+
self._memory.clear()
+ return True
- def explain(self):
+ def explain(self) -> str:
"""
Returns the explanation of the code how it reached to the solution
"""
- pass
+ try:
+ prompt = ExplainPrompt()
+ prompt.set_var("code", self._lake.last_code_executed)
+ response = self._lake.llm.call(prompt)
+ self.logger.log(
+ f"""Explaination: {response}
+ """
+ )
+ return response
+ except Exception as exception:
+ return (
+ "Unfortunately, I was not able to explain, "
+ "because of the following error:\n"
+ f"\n{exception}\n"
+ )
diff --git a/pandasai/agent/response.py b/pandasai/agent/response.py
new file mode 100644
index 000000000..1aff4423f
--- /dev/null
+++ b/pandasai/agent/response.py
@@ -0,0 +1,38 @@
+from typing import List
+
+
+class ClarificationResponse:
+ """
+ Clarification Response
+
+ """
+
+ def __init__(
+ self, success: bool = True, questions: List[str] = None, message: str = ""
+ ):
+ """
+ Args:
+ success: Whether the response generated or not.
+ questions: List of questions
+ """
+ self._success: bool = success
+ self._questions: List[str] = questions
+ self._message: str = message
+
+ @property
+ def questions(self) -> List[str]:
+ return self._questions
+
+ @property
+ def message(self) -> List[str]:
+ return self._message
+
+ @property
+ def success(self) -> bool:
+ return self._success
+
+ def __bool__(self) -> bool:
+ """
+ Define the success of response.
+ """
+ return self._success
diff --git a/pandasai/helpers/memory.py b/pandasai/helpers/memory.py
index ad7478fd9..568c2bbf1 100644
--- a/pandasai/helpers/memory.py
+++ b/pandasai/helpers/memory.py
@@ -8,12 +8,14 @@ class Memory:
_messages: list
_max_messages: int
- def __init__(self, max_messages=sys.maxsize):
+ def __init__(self, max_messages: int = sys.maxsize):
self._messages = []
self._max_messages = max_messages
def add(self, message: str, is_user: bool):
self._messages.append({"message": message, "is_user": is_user})
+
+ # Delete two entry because of the conversation
if len(self._messages) > self._max_messages:
del self._messages[:2]
diff --git a/pandasai/prompts/explain_prompt.py b/pandasai/prompts/explain_prompt.py
index fc969629f..9f4612470 100644
--- a/pandasai/prompts/explain_prompt.py
+++ b/pandasai/prompts/explain_prompt.py
@@ -1,47 +1,23 @@
-""" Prompt to get clarification questions
-You are provided with the following pandas DataFrames:
-
-
-{dataframe}
-
-
-
-{conversation}
-
-
-Based on the conversation, are there any clarification questions that a senior data scientist would ask? These are questions for non technical people, only ask for questions they could ask given low tech expertise and no knowledge about how the dataframes are structured.
-
-Return the JSON array of the clarification questions. If there is no clarification question, return an empty array.
-
-Json:
-""" # noqa: E501
+""" Prompt to explain solution generated
+Based on the last conversation you generated the code.
+Can you explain briefly for non technical person on how you came up with code
+without explaining pandas library?
+"""
from .base import Prompt
-class ClarificationQuestionPrompt(Prompt):
+class ExplainPrompt(Prompt):
"""Prompt to get clarification questions"""
text: str = """
-You are provided with the following pandas DataFrames:
-
-
-{dataframes}
-
-
-
-{conversation}
-
-
-Based on the conversation, are there any clarification questions
-that a senior data scientist would ask? These are questions for non technical people,
-only ask for questions they could ask given low tech expertise and
-no knowledge about how the dataframes are structured.
-
-Return the JSON array of the clarification questions.
+Based on the last conversation you generated the code.
-If there is no clarification question, return an empty array.
+
+{code}
+