Skip to content

Commit

Permalink
Allow GC artifact to be set prior to calling initial conversation ste…
Browse files Browse the repository at this point in the history
…p. (microsoft#237)

### GC Configs
- Updates doc agent's gc naming to add clarity
- add filenames to gc attachment check config
- add filenames and current outline to gc outline feedback config

### Doc agents' GC helper file updated: 
- separate out GC constructor from conversation step --> use constructor
to set initial state in storage
- add getter for artifact --> only reads from storage
- add setter for artifact --> writes to storage AND resets the GC
instance with this new info
- step_conversation shrinks... only needs last_user_message now in
parameters.

### Doc agent
- updates logic per changes in GC helper file to initiate GC, update
artifact, and run conversation step.
  • Loading branch information
momuno authored Nov 12, 2024
1 parent 65c1086 commit f3819aa
Show file tree
Hide file tree
Showing 5 changed files with 147 additions and 94 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def parse_properties(properties: Dict[str, Any]) -> Dict[str, Any]:
#


class GuidedConversationAgentConfigModel(BaseModel):
class GuidedConversationConfigModel(BaseModel):
enabled: Annotated[
bool,
Field(description=helpers.load_text_include("guided_conversation_agent_enabled.md")),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

from ... import helpers
from . import config_defaults as config_defaults
from .config import GuidedConversationAgentConfigModel
from .config import GuidedConversationConfigModel

if TYPE_CHECKING:
pass
Expand All @@ -19,6 +19,9 @@
class ArtifactModel(BaseModel):
final_response: str = Field(description="The final response from the agent to the user.")
conversation_status: str = Field(description="The status of the conversation.")
filenames: str = Field(
description="Names of the available files currently uploaded as attachments. May be an empty string if no files are attached."
)


# Rules - These are the do's and don'ts that the agent should follow during the conversation.
Expand Down Expand Up @@ -107,7 +110,7 @@ def parse_properties(properties: Dict[str, Any]) -> Dict[str, Any]:
#


class GCAttachmentCheckConfigModel(GuidedConversationAgentConfigModel):
class GCAttachmentCheckConfigModel(GuidedConversationConfigModel):
enabled: Annotated[
bool,
Field(description=helpers.load_text_include("guided_conversation_agent_enabled.md")),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

from ... import helpers
from . import config_defaults as config_defaults
from .config import GuidedConversationAgentConfigModel
from .config import GuidedConversationConfigModel

if TYPE_CHECKING:
pass
Expand All @@ -20,6 +20,10 @@ class ArtifactModel(BaseModel):
final_response: str = Field(description="The final response from the agent to the user.")
conversation_status: str = Field(description="The status of the conversation.")
user_decision: str = Field(description="The decision of the user on what should happen next.")
filenames: str = Field(
description="Names of the available files currently uploaded as attachments. Information from the content of these files was used to help draft the outline under review."
)
current_outline: str = Field(description="The most up-to-date version of the outline under review.")


# Rules - These are the do's and don'ts that the agent should follow during the conversation.
Expand Down Expand Up @@ -110,7 +114,7 @@ def parse_properties(properties: Dict[str, Any]) -> Dict[str, Any]:
#


class GCDraftOutlineFeedbackConfigModel(GuidedConversationAgentConfigModel):
class GCDraftOutlineFeedbackConfigModel(GuidedConversationConfigModel):
enabled: Annotated[
bool,
Field(description=helpers.load_text_include("guided_conversation_agent_enabled.md")),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import logging
from pathlib import Path

from guided_conversation.guided_conversation_agent import GuidedConversation
from guided_conversation.guided_conversation_agent import GuidedConversation as GuidedConversationAgent
from openai import AsyncOpenAI
from semantic_kernel import Kernel
from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion
Expand All @@ -12,7 +12,7 @@
)

from ...config import AssistantConfigModel
from .config import GuidedConversationAgentConfigModel
from .config import GuidedConversationConfigModel
from .status import Status, StepName

logger = logging.getLogger(__name__)
Expand All @@ -23,82 +23,114 @@
#


class GuidedConversationAgent:
class GuidedConversation:
"""
An agent for managing artifacts.
"""

@staticmethod
def get_state(
conversation_context: ConversationContext,
) -> dict | None:
"""
Get the state of the guided conversation agent.
"""
return _read_guided_conversation_state(conversation_context)

@staticmethod
async def step_conversation(
def __init__(
self,
config: AssistantConfigModel,
openai_client: AsyncOpenAI,
agent_config: GuidedConversationAgentConfigModel,
agent_config: GuidedConversationConfigModel,
conversation_context: ConversationContext,
last_user_message: str | None,
) -> tuple[str, Status, StepName | None]:
"""
Step the conversation to the next turn.
"""
next_step_name = None

rules = agent_config.rules
conversation_flow = agent_config.conversation_flow
context = agent_config.context
resource_constraint = agent_config.resource_constraint
artifact = agent_config.get_artifact_model()
) -> None:
self.guided_conversation_agent: GuidedConversationAgent
self.conversation_context: ConversationContext = conversation_context

# plug in attachments

kernel = Kernel()
service_id = "gc_main"
self.kernel = Kernel()
self.service_id = "gc_main"

chat_service = OpenAIChatCompletion(
service_id=service_id,
service_id=self.service_id,
async_client=openai_client,
ai_model_id=config.request_config.openai_model,
)
kernel.add_service(chat_service)
self.kernel.add_service(chat_service)

guided_conversation_agent: GuidedConversation
self.artifact_model = agent_config.get_artifact_model()
self.conversation_flow = agent_config.conversation_flow
self.context = agent_config.context
self.rules = agent_config.rules
self.resource_constraint = agent_config.resource_constraint

state = _read_guided_conversation_state(conversation_context)
if state:
guided_conversation_agent = GuidedConversation.from_json(
self.guided_conversation_agent = GuidedConversationAgent.from_json(
json_data=state,
kernel=kernel,
artifact=artifact, # type: ignore
conversation_flow=conversation_flow,
context=context,
rules=rules,
resource_constraint=resource_constraint,
service_id=service_id,
kernel=self.kernel,
artifact=self.artifact_model, # type: ignore
conversation_flow=self.conversation_flow,
context=self.context,
rules=self.rules,
resource_constraint=self.resource_constraint,
service_id=self.service_id,
)
else:
guided_conversation_agent = GuidedConversation(
kernel=kernel,
artifact=artifact, # type: ignore
conversation_flow=conversation_flow,
context=context,
rules=rules,
resource_constraint=resource_constraint,
service_id=service_id,
self.guided_conversation_agent = GuidedConversationAgent(
kernel=self.kernel,
artifact=self.artifact_model, # type: ignore
conversation_flow=self.conversation_flow,
context=self.context,
rules=self.rules,
resource_constraint=self.resource_constraint,
service_id=self.service_id,
)
_write_guided_conversation_state(conversation_context, self.guided_conversation_agent.to_json())

@staticmethod
def get_state(
conversation_context: ConversationContext,
) -> dict | None:
"""
Get the state of the guided conversation agent.
"""
return _read_guided_conversation_state(conversation_context)

def get_artifact_dict(self) -> dict | None:
artifact_dict = None
state_dict = self.get_state(self.conversation_context)
if state_dict is not None:
artifact_item = state_dict.get("artifact")
if artifact_item is not None:
artifact_dict = artifact_item.get("artifact")
return artifact_dict

def set_artifact_dict(self, artifact_dict: dict) -> None:
state_dict = self.get_state(self.conversation_context)
if state_dict is not None:
artifact_item = state_dict.get("artifact")
if artifact_item is not None:
artifact_item["artifact"] = artifact_dict
# Update storage with new state info
_write_guided_conversation_state(self.conversation_context, state_dict)
# Update GC with new state info
self.guided_conversation_agent = GuidedConversationAgent.from_json(
json_data=state_dict,
kernel=self.kernel,
artifact=self.artifact_model, # type: ignore
conversation_flow=self.conversation_flow,
context=self.context,
rules=self.rules,
resource_constraint=self.resource_constraint,
service_id=self.service_id,
)

async def step_conversation(
self,
last_user_message: str | None,
) -> tuple[str, Status, StepName | None]:
"""
Step the conversation to the next turn.
"""
next_step_name = None

# Step the conversation to start the conversation with the agent
# or message
result = await guided_conversation_agent.step_conversation(last_user_message)
result = await self.guided_conversation_agent.step_conversation(last_user_message)

# Save the state of the guided conversation agent
_write_guided_conversation_state(conversation_context, guided_conversation_agent.to_json())
_write_guided_conversation_state(self.conversation_context, self.guided_conversation_agent.to_json())

# convert information in artifact for Document Agent
# conversation_status: # this should relate to result.is_conversation_over
Expand All @@ -110,7 +142,7 @@ async def step_conversation(
response: str = ""

# to_json is actually to dict, not to json.
gc_dict = guided_conversation_agent.to_json()
gc_dict = self.guided_conversation_agent.to_json()
artifact_item = gc_dict.get("artifact")
if artifact_item is not None:
artifact_item = artifact_item.get("artifact")
Expand All @@ -129,7 +161,7 @@ async def step_conversation(
response = ""
status = Status.NOT_COMPLETED
elif conversation_status == "user_completed":
_delete_guided_conversation_state(conversation_context)
_delete_guided_conversation_state(self.conversation_context)
response = final_response
if user_decision is None:
status = Status.USER_COMPLETED
Expand All @@ -146,7 +178,7 @@ async def step_conversation(
else:
logger.error("unknown user decision")
else:
_delete_guided_conversation_state(conversation_context)
_delete_guided_conversation_state(self.conversation_context)
status = Status.USER_EXIT_EARLY
response = final_response

Expand Down
82 changes: 48 additions & 34 deletions assistants/prospector-assistant/assistant/agents/document_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,10 @@
from semantic_workbench_assistant.assistant_app import ConversationContext, storage_directory_for_context

from ..config import AssistantConfigModel
from .document.config import GuidedConversationAgentConfigModel
from .document.config import GuidedConversationConfigModel
from .document.gc_attachment_check_config import GCAttachmentCheckConfigModel
from .document.gc_draft_outline_feedback_config import GCDraftOutlineFeedbackConfigModel
from .document.guided_conversation import GuidedConversationAgent
from .document.guided_conversation import GuidedConversation
from .document.status import Status, StepName

logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -759,24 +759,33 @@ async def _gc_attachment_check(
message: ConversationMessage,
metadata: dict[str, Any] = {},
) -> tuple[Status, StepName | None]:
method_metadata_key = "document_agent_gc_response"
method_metadata_key = "document_agent_gc_attachment_check"

gc_conversation_config: GuidedConversationAgentConfigModel = GCAttachmentCheckConfigModel()
# get attachment filenames for context
gc_attachment_conversation_config: GuidedConversationConfigModel = GCAttachmentCheckConfigModel()

guided_conversation = GuidedConversation(
config=config,
openai_client=openai_client.create_client(config.service_config),
agent_config=gc_attachment_conversation_config,
conversation_context=context,
)

# update artifact
filenames = await self._attachments_extension.get_attachment_filenames(
context, config=config.agents_config.attachment_agent
)

filenames_str = ", ".join(filenames)
filenames_str = "Filenames already attached: " + filenames_str
gc_conversation_config.context = gc_conversation_config.context + "\n\n" + filenames_str

artifact_dict = guided_conversation.get_artifact_dict()
if artifact_dict is not None:
artifact_dict["filenames"] = filenames_str
guided_conversation.set_artifact_dict(artifact_dict)
else:
logger.error("artifact_dict unavailable.")

# run guided conversation step
try:
response_message, conversation_status, next_step_name = await GuidedConversationAgent.step_conversation(
config=config,
openai_client=openai_client.create_client(config.service_config),
agent_config=gc_conversation_config,
conversation_context=context,
response_message, conversation_status, next_step_name = await guided_conversation.step_conversation(
last_user_message=message.content,
)

Expand Down Expand Up @@ -898,38 +907,43 @@ async def _gc_outline_feedback(
message: ConversationMessage | None,
metadata: dict[str, Any] = {},
) -> tuple[Status, StepName | None]:
method_metadata_key = "document_agent_gc_response"
method_metadata_key = "document_agent_gc_outline_feedback"

gc_do_feedback_config: GuidedConversationAgentConfigModel = GCDraftOutlineFeedbackConfigModel()
# get attachment filenames for context
if message is not None:
user_message = message.content
else:
user_message = None

gc_outline_feedback_config: GuidedConversationConfigModel = GCDraftOutlineFeedbackConfigModel()

guided_conversation = GuidedConversation(
config=config,
openai_client=openai_client.create_client(config.service_config),
agent_config=gc_outline_feedback_config,
conversation_context=context,
)

# update artifact
filenames = await self._attachments_extension.get_attachment_filenames(
context, config=config.agents_config.attachment_agent
)

filenames_str = ", ".join(filenames)
filenames_str = "Filenames already attached: " + filenames_str
gc_do_feedback_config.context = gc_do_feedback_config.context + "\n\n" + filenames_str

# get current outline related info
current_outline: str | None = None
outline_str: str = ""
if path.exists(storage_directory_for_context(context) / "document_agent/outline.txt"):
current_outline = (storage_directory_for_context(context) / "document_agent/outline.txt").read_text()
outline_str = (storage_directory_for_context(context) / "document_agent/outline.txt").read_text()

if current_outline is not None:
outline_str = "Current outline under review: " + current_outline
gc_do_feedback_config.context = gc_do_feedback_config.context + "\n\n" + outline_str

if message is not None:
user_message = message.content
artifact_dict = guided_conversation.get_artifact_dict()
if artifact_dict is not None:
artifact_dict["filenames"] = filenames_str
artifact_dict["current_outline"] = outline_str
guided_conversation.set_artifact_dict(artifact_dict)
else:
user_message = None
logger.error("artifact_dict unavailable.")

# run guided conversation step
try:
response_message, conversation_status, next_step_name = await GuidedConversationAgent.step_conversation(
config=config,
openai_client=openai_client.create_client(config.service_config),
agent_config=gc_do_feedback_config,
conversation_context=context,
response_message, conversation_status, next_step_name = await guided_conversation.step_conversation(
last_user_message=user_message,
)

Expand Down

0 comments on commit f3819aa

Please sign in to comment.