Skip to content

Commit

Permalink
rebuild data type in rebuttal-related functions
Browse files Browse the repository at this point in the history
  • Loading branch information
chengzr01 committed May 23, 2024
1 parent 6d2e120 commit 08a7e64
Show file tree
Hide file tree
Showing 6 changed files with 124 additions and 66 deletions.
72 changes: 60 additions & 12 deletions research_town/agents/agent_base.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,29 @@
import uuid
from datetime import datetime
from typing import Any, Dict, List, Tuple

from ..kbs.envlog import (
AgentAgentDiscussionLog,
AgentPaperMetaReviewLog,
AgentPaperRebuttalLog,
AgentPaperReviewLog,
)
from ..kbs.profile import AgentProfile, PaperProfile
from ..utils.agent_collector import bfs
from ..utils.agent_prompter import (
communicate_with_multiple_researchers_prompting,
find_collaborators_prompting,
generate_ideas_prompting,
generate_profile,
make_review_decision_prompting,
rebut_review_prompting,
review_paper_prompting,
review_score_prompting,
generate_profile,
summarize_research_field_prompting,
write_paper_abstract_prompting,
)
from ..utils.agent_collector import bfs
from ..utils.paper_collector import get_paper_list
from ..kbs.profile import AgentProfile, PaperProfile
from ..kbs.envlog import AgentPaperReviewLog, AgentPaperMetaReviewLog, AgentPaperRebuttalLog, AgentAgentDiscussionLog


class BaseResearchAgent(object):
def __init__(self, name: str) -> None:
Expand Down Expand Up @@ -77,7 +85,7 @@ def get_co_author_relationships(
agent_profile: AgentProfile,
max_node: int
) -> Tuple[List[Tuple[str, str]], Dict[str, List[Dict[str, Any]]], Dict[str, List[Dict[str, Any]]]]:
start_author = [name]
start_author = [self.name]
graph, node_feat, edge_feat = bfs(
author_list=start_author, node_limit=max_node)
return graph, node_feat, edge_feat
Expand Down Expand Up @@ -112,27 +120,67 @@ def review_paper(
self,
paper: PaperProfile
) -> AgentPaperReviewLog:
paper_review = review_paper_prompting(paper)[0]
paper_dict = {paper.title: paper.abstract}
paper_review = review_paper_prompting(paper_dict)[0]
review_score = review_score_prompting(paper_review)
return review_score, paper_review

review_log = AgentPaperReviewLog()
review_log.timestep = (int)(datetime.now().timestamp())
review_log.review_id = str(uuid.uuid4())
review_log.paper_id = paper.paper_id
review_log.agent_id = self.profile.agent_id
review_log.review_content = paper_review
review_log.review_score = review_score

return review_log

def make_review_decision(
self,
paper: PaperProfile,
review: List[AgentPaperReviewLog]
) -> AgentPaperMetaReviewLog:
meta_review = make_review_decision_prompting(paper, review)
paper_dict = {paper.title: paper.abstract}
review_dict = {}
for agent_review_log in review:
review_dict[agent_review_log.review_id] = (agent_review_log.review_score, agent_review_log.review_content)

meta_review = make_review_decision_prompting(paper_dict, review_dict)

if "accept" in meta_review[0].lower():
review_decision = True
else:
review_decision = False
return review_decision, meta_review[0]


meta_review_log = AgentPaperMetaReviewLog()
meta_review_log.timestep = (int)(datetime.now().timestamp())
meta_review_log.decision_id = str(uuid.uuid4())
meta_review_log.paper_id = paper.paper_id
meta_review_log.decision = "accept" if review_decision else "reject"
meta_review_log.meta_review = meta_review[0]
meta_review_log.agent_id = self.profile.agent_id
return meta_review_log

def rebut_review(
self,
paper: PaperProfile,
review: List[AgentPaperReviewLog],
decision: List[AgentPaperMetaReviewLog]
) -> AgentPaperRebuttalLog:
rebut_review = rebut_review_prompting(paper, review, decision)
return rebut_review[0]
paper_dict = {paper.title: paper.abstract}
review_dict = {}
for agent_review_log in review:
review_dict[agent_review_log.review_id] = (agent_review_log.review_score, agent_review_log.review_content)

decision_dict = {}
for agent_meta_review_log in decision:
decision_dict[agent_meta_review_log.decision_id] = (agent_meta_review_log.decision == "accept", agent_meta_review_log.meta_review)

rebut_review = rebut_review_prompting(paper_dict, review_dict, decision_dict)

rebuttal_log = AgentPaperRebuttalLog()
rebuttal_log.timestep = (int)(datetime.now().timestamp())
rebuttal_log.rebuttal_id = str(uuid.uuid4())
rebuttal_log.paper_id = paper.paper_id
rebuttal_log.agent_id = self.profile.agent_id
rebuttal_log.rebuttal_content =rebut_review[0]
return rebuttal_log
10 changes: 8 additions & 2 deletions research_town/envs/env_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,20 @@

from ..agents.agent_base import BaseResearchAgent
from ..kbs.kb_base import BaseKnowledgeBase
from ..kbs.profile import AgentProfile


class BaseMultiAgentEnv(object):
def __init__(self, agent_dict: Dict[str, str]) -> None:
self.agents: Dict[str, BaseResearchAgent] = {}
self.profiles: Dict[str, AgentProfile] = {}
self.kb = BaseKnowledgeBase()
for agent_name, agent in agent_dict.items():
self.agents[agent_name] = BaseResearchAgent(agent)
for agent_id, agent_name in agent_dict.items():
self.agents[agent_id] = BaseResearchAgent(agent_name)
self.profiles[agent_id] = AgentProfile()
self.profiles[agent_id].agent_id = agent_id
self.profiles[agent_id].name = agent_name
self.profiles[agent_id].profile = self.agents[agent_id].get_profile(agent_name)["profile"]

def step(self) -> None:
raise NotImplementedError
61 changes: 27 additions & 34 deletions research_town/envs/env_paper_rebuttal.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,11 @@
from typing import Dict, Tuple

from ..kbs.envlog import (
AgentPaperMetaReviewLog,
AgentPaperRebuttalLog,
AgentPaperReviewLog,
)
from ..kbs.profile import PaperProfile
from .env_base import BaseMultiAgentEnv


Expand All @@ -10,21 +16,16 @@ def __init__(self, agent_dict: Dict[str, str]) -> None:
self.turn_max = 1
self.terminated = False
self.roles: Dict[str, str] = {}
self.submission: Dict[str, str] = {}
self.review = ""
self.decision = ""
self.rebuttal = ""

self.submission = PaperProfile()
self.review: Dict[str, AgentPaperReviewLog] = {}
self.rebuttal: Dict[str, AgentPaperRebuttalLog] = {}
self.meta_review: Dict[str, AgentPaperMetaReviewLog] = {}
def assign_roles(self, role_dict: Dict[str, str]) -> None:
self.roles = role_dict

def initialize_submission(self, external_data: Dict[str, str]) -> None:
self.submission = external_data

def submit_review(self, review_dict: Dict[str, Tuple[int, str]]) -> None:
review_serialize = [
f"Reviewer: {name}\nScore: {review[0]}\nReview: {review[1]}" for name, review in review_dict.items()]
self.review = "\n\n".join(review_serialize)
def initialize_submission(self, paper_profile: PaperProfile) -> None:
self.submission = paper_profile

def submit_decision(self, decision_dict: Dict[str, Tuple[bool, str]]) -> None:
decision_count = {"accept": 0, "reject": 0}
Expand All @@ -40,39 +41,31 @@ def submit_decision(self, decision_dict: Dict[str, Tuple[bool, str]]) -> None:
self.decision = d

def submit_rebuttal(self, rebuttal_dict: Dict[str, str]) -> None:
rebuttal_serialize = [
f"Author: {name}\nRebuttal: {rebuttal}" for name, rebuttal in rebuttal_dict.items()]
self.rebuttal = "\n\n".join(rebuttal_serialize)
pass

def step(self) -> None:
# Paper Reviewing
review_dict: Dict[str, Tuple[int, str]] = {}
for name, role in self.roles.items():
for agent_id, role in self.roles.items():
if role == "reviewer":
review_dict[name] = self.agents[name].review_paper(
self.review[agent_id] = self.agents[agent_id].review_paper(
paper=self.submission)
self.submit_review(review_dict)

# Decision Making
decision_dict: Dict[str, Tuple[bool, str]] = {}
for name, role in self.roles.items():
# Paper Meta Reviewing
review_list = [review for _, review in self.review.items()]
for agent_id, role in self.roles.items():
if role == "reviewer":
decision_dict[name] = self.agents[name].make_review_decision(
submission=self.submission, review=review_dict)
self.submit_decision(decision_dict)
self.meta_review[agent_id] = self.agents[agent_id].make_review_decision(
paper=self.submission, review=review_list)

# Rebuttal Submitting
rebuttal_dict: Dict[str, str] = {}
for name, role in self.roles.items():
meta_review_list = [meta_review for _, meta_review in self.meta_review.items()]
for agent_id, role in self.roles.items():
if role == "author":
rebuttal_dict[name] = self.agents[name].rebut_review(
submission=self.submission,
review=review_dict,
decision=decision_dict)
self.submit_rebuttal(rebuttal_dict)
self.rebuttal[agent_id] = self.agents[agent_id].rebut_review(
paper=self.submission,
review=review_list,
decision=meta_review_list)

self.turn_number += 1
if self.decision == "accept":
self.terminated = True
if self.turn_number >= self.turn_max:
self.terminated = True
24 changes: 12 additions & 12 deletions research_town/utils/agent_prompter.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,14 +161,14 @@ def review_score_prompting(paper_review: str, llm_model: Optional[str] = "mistra
else:
return 0

def review_paper_prompting(external_data: Dict[str, str], llm_model: Optional[str] = "mistralai/Mixtral-8x7B-Instruct-v0.1") -> List[str]:
def review_paper_prompting(paper: Dict[str, str], llm_model: Optional[str] = "mistralai/Mixtral-8x7B-Instruct-v0.1") -> List[str]:
"""
Review paper from using list, and external data (published papers)
"""

papers_serialize = []
for _, timestamp in enumerate(external_data.keys()):
paper_entry = f"Title: {timestamp}\nPaper: {external_data[timestamp]}"
for _, title in enumerate(paper.keys()):
paper_entry = f"Title: {title}\nPaper: {paper[title]}"
papers_serialize.append(paper_entry)
papers_serialize_all = "\n\n".join(papers_serialize)

Expand All @@ -184,13 +184,13 @@ def review_paper_prompting(external_data: Dict[str, str], llm_model: Optional[s
return openai_prompting(llm_model, prompt)


def make_review_decision_prompting(submission: Dict[str, str], review: Dict[str, Tuple[int,str]], llm_model: Optional[str] = "mistralai/Mixtral-8x7B-Instruct-v0.1") -> List[str]:
submission_serialize = []
for _, title in enumerate(submission.keys()):
abstract = submission[title]
submission_entry = f"Title: {title}\nAbstract:{abstract}\n"
submission_serialize.append(submission_entry)
submission_serialize_all = "\n\n".join(submission_serialize)
def make_review_decision_prompting(paper: Dict[str, str], review: Dict[str, Tuple[int,str]], llm_model: Optional[str] = "mistralai/Mixtral-8x7B-Instruct-v0.1") -> List[str]:
paper_serialize = []
for _, title in enumerate(paper.keys()):
abstract = paper[title]
paper_entry = f"Title: {title}\nAbstract:{abstract}\n"
paper_serialize.append(paper_entry)
paper_serialize_all = "\n\n".join(paper_serialize)

review_serialize = []
for _, name in enumerate(review.keys()):
Expand All @@ -201,10 +201,10 @@ def make_review_decision_prompting(submission: Dict[str, str], review: Dict[str,

prompt_template = (
"Please make an review decision to decide whether the following submission should be accepted or rejected by an academic conference. Here are several reviews from reviewers for this submission. Please indicate your review decision as accept or reject."
"Here is the submission: {submission_serialize_all}"
"Here is the submission: {paper_serialize_all}"
"Here are the reviews: {review_serialize_all}"
)
template_input = {"submission_serialize_all": submission_serialize_all,
template_input = {"paper_serialize_all": paper_serialize_all,
"review_serialize_all": review_serialize_all}
prompt = prompt_template.format_map(template_input)
return openai_prompting(llm_model, prompt)
Expand Down
2 changes: 1 addition & 1 deletion tests/test_agent_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def test_make_review_decision(mock_openai_prompting: MagicMock) -> None:

research_agent = BaseResearchAgent("Jiaxuan You")
submission = {"MambaOut: Do We Really Need Mamba for Vision?": "Mamba, an architecture with RNN-like token mixer of state space model (SSM), was recently introduced to address the quadratic complexity of the attention mechanism and subsequently applied to vision tasks. Nevertheless, the performance of Mamba for vision is often underwhelming when compared with convolutional and attention-based models. In this paper, we delve into the essence of Mamba, and conceptually conclude that Mamba is ideally suited for tasks with long-sequence and autoregressive characteristics. For vision tasks, as image classification does not align with either characteristic, we hypothesize that Mamba is not necessary for this task; Detection and segmentation tasks are also not autoregressive, yet they adhere to the long-sequence characteristic, so we believe it is still worthwhile to explore Mamba's potential for these tasks. To empirically verify our hypotheses, we construct a series of models named \\emph{MambaOut} through stacking Mamba blocks while removing their core token mixer, SSM. Experimental results strongly support our hypotheses. Specifically, our MambaOut model surpasses all visual Mamba models on ImageNet image classification, indicating that Mamba is indeed unnecessary for this task. As for detection and segmentation, MambaOut cannot match the performance of state-of-the-art visual Mamba models, demonstrating the potential of Mamba for long-sequence visual tasks."}
review = research_agent.review_paper(paper=submission)
review = research_agent.review_paper(submission)
review_decision, meta_review = research_agent.make_review_decision(
submission=submission, review={"Jiaxuan You": review})
assert review_decision is True
Expand Down
21 changes: 16 additions & 5 deletions tests/test_envs.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,32 @@
import uuid
from unittest.mock import MagicMock, patch

from research_town.envs.env_paper_rebuttal import (
PaperRebuttalMultiAgentEnv,
)
from research_town.kbs.profile import AgentProfile, PaperProfile


@patch("research_town.utils.agent_prompter.openai_prompting")
def test_paper_rebuttal_env(mock_openai_prompting: MagicMock) -> None:
mock_openai_prompting.return_value = [
"Paper Rebuttal Environment."]
env = PaperRebuttalMultiAgentEnv(agent_dict={"Jiaxuan You": "Jiaxuan You", "Rex Ying":
"Rex Ying", "Jure Leskovec": "Jure Leskovec", "Christos Faloutsos": "Christos Faloutsos"})
env.assign_roles({"Jiaxuan You": "author", "Rex Ying": "author",
"Jure Leskovec": "reviewer", "Christos Faloutsos": "reviewer"})
env.initialize_submission({"MambaOut: Do We Really Need Mamba for Vision?": "Mamba, an architecture with RNN-like token mixer of state space model (SSM), was recently introduced to address the quadratic complexity of the attention mechanism and subsequently applied to vision tasks. Nevertheless, the performance of Mamba for vision is often underwhelming when compared with convolutional and attention-based models. In this paper, we delve into the essence of Mamba, and conceptually conclude that Mamba is ideally suited for tasks with long-sequence and autoregressive characteristics. For vision tasks, as image classification does not align with either characteristic, we hypothesize that Mamba is not necessary for this task; Detection and segmentation tasks are also not autoregressive, yet they adhere to the long-sequence characteristic, so we believe it is still worthwhile to explore Mamba's potential for these tasks. To empirically verify our hypotheses, we construct a series of models named \\emph{MambaOut} through stacking Mamba blocks while removing their core token mixer, SSM. Experimental results strongly support our hypotheses. Specifically, our MambaOut model surpasses all visual Mamba models on ImageNet image classification, indicating that Mamba is indeed unnecessary for this task. As for detection and segmentation, MambaOut cannot match the performance of state-of-the-art visual Mamba models, demonstrating the potential of Mamba for long-sequence visual tasks."})
author_id = str(uuid.uuid4())
reviewer_id = str(uuid.uuid4())
agent_dict = {author_id: "Jiaxuan You", reviewer_id: "Jure Leskovec"}
env = PaperRebuttalMultiAgentEnv(agent_dict=agent_dict)

submission = PaperProfile()
submission.paper_id = str(uuid.uuid4())
submission.title = "MambaOut: Do We Really Need Mamba for Vision?"
submission.abstract = "Mamba, an architecture with RNN-like token mixer of state space model (SSM), was recently introduced to address the quadratic complexity of the attention mechanism and subsequently applied to vision tasks. Nevertheless, the performance of Mamba for vision is often underwhelming when compared with convolutional and attention-based models. In this paper, we delve into the essence of Mamba, and conceptually conclude that Mamba is ideally suited for tasks with long-sequence and autoregressive characteristics. For vision tasks, as image classification does not align with either characteristic, we hypothesize that Mamba is not necessary for this task; Detection and segmentation tasks are also not autoregressive, yet they adhere to the long-sequence characteristic, so we believe it is still worthwhile to explore Mamba's potential for these tasks. To empirically verify our hypotheses, we construct a series of models named \\emph{MambaOut} through stacking Mamba blocks while removing their core token mixer, SSM. Experimental results strongly support our hypotheses. Specifically, our MambaOut model surpasses all visual Mamba models on ImageNet image classification, indicating that Mamba is indeed unnecessary for this task. As for detection and segmentation, MambaOut cannot match the performance of state-of-the-art visual Mamba models, demonstrating the potential of Mamba for long-sequence visual tasks."

env.initialize_submission(submission)
env.assign_roles({author_id: "author", reviewer_id: "reviewer"})

while not env.terminated:
env.step()

assert isinstance(env.review, str)
assert isinstance(env.decision, str)
assert isinstance(env.rebuttal, str)

0 comments on commit 08a7e64

Please sign in to comment.