-
Notifications
You must be signed in to change notification settings - Fork 61
Review2 #861
base: development
Are you sure you want to change the base?
Review2 #861
Changes from all commits
487c863
f39284b
2bd1640
9a3c7fb
16a6046
b98186f
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,140 @@ | ||
import { getBotContext, getLogger } from "../../../bindings"; | ||
import { Payload, StreamlinedComment } from "../../../types"; | ||
import { getAllIssueComments, getPullByNumber } from "../../../helpers"; | ||
import { CreateChatCompletionRequestMessage } from "openai/resources/chat"; | ||
import { askGPT, getPRSpec, specCheckTemplate, validationMsg } from "../../../helpers/gpt"; | ||
import { ErrorDiff } from "../../../utils/helpers"; | ||
|
||
/** | ||
* @notice Three calls to OpenAI are made. First for context, second for review and third for finalization. | ||
* @returns Pull Request Report | ||
*/ | ||
export const review = async (body: string) => { | ||
const context = getBotContext(); | ||
const logger = getLogger(); | ||
|
||
const payload = context.payload as Payload; | ||
const issue = payload.issue; | ||
|
||
if (!issue) { | ||
return ErrorDiff(`Payload issue is undefined.`); | ||
} | ||
|
||
if (!body) { | ||
return ErrorDiff(`Payload body is undefined.`); | ||
} | ||
|
||
const isPr = await getPullByNumber(context, issue.number); | ||
|
||
if (!isPr) { | ||
return ErrorDiff(`Can only be used on pull requests.`); | ||
} | ||
|
||
const reviewRegex = /^\/review/; | ||
const reviewRegexMatch = body.match(reviewRegex); | ||
|
||
if (!reviewRegexMatch) { | ||
return ErrorDiff(`Error matching regex for review`); | ||
} | ||
|
||
const streamlined: StreamlinedComment[] = []; | ||
let chatHistory: CreateChatCompletionRequestMessage[] = []; | ||
const commentsRaw = await getAllIssueComments(issue.number, "raw"); | ||
|
||
if (!commentsRaw) { | ||
logger.info(`Error getting issue comments`); | ||
return ErrorDiff(`Error getting issue comments.`); | ||
} | ||
|
||
// return a diff of the changes made in the PR | ||
const comparePR = async () => { | ||
const comparePR = await context.octokit.pulls.get({ | ||
owner: payload.repository.owner.login, | ||
repo: payload.repository.name, | ||
pull_number: issue.number, | ||
}); | ||
|
||
const pr = comparePR.data; | ||
|
||
const prDiff = await context.octokit.pulls.get({ | ||
owner: payload.repository.owner.login, | ||
repo: payload.repository.name, | ||
pull_number: pr.number, | ||
mediaType: { | ||
format: "diff", | ||
}, | ||
}); | ||
|
||
const diffContent = prDiff.data; | ||
|
||
return { | ||
pr, | ||
diff: diffContent, | ||
}; | ||
}; | ||
|
||
const isPull = async () => { | ||
if (isPr) { | ||
const diff = await comparePR() | ||
.then(({ diff }) => { | ||
return diff; | ||
}) | ||
.catch((error) => { | ||
logger.info(`Error getting diff: ${error}`); | ||
return ErrorDiff(`Error getting diff: ${error}`); | ||
}); | ||
|
||
const spec = await getPRSpec(context, chatHistory, streamlined); | ||
|
||
chatHistory = []; | ||
chatHistory.push( | ||
{ | ||
role: "system", | ||
content: specCheckTemplate, | ||
} as CreateChatCompletionRequestMessage, | ||
{ | ||
role: "assistant", | ||
content: "Spec for Pr: \n" + JSON.stringify(spec), | ||
} as CreateChatCompletionRequestMessage, | ||
{ | ||
role: "user", | ||
content: `${issue.assignees[0].login}'s PR Diff: \n` + JSON.stringify(diff), | ||
} as CreateChatCompletionRequestMessage | ||
); | ||
|
||
const gptResponse = await askGPT(`Pr review call for #${issue.number}`, chatHistory); | ||
|
||
chatHistory = []; | ||
chatHistory.push( | ||
{ | ||
role: "system", | ||
content: validationMsg, | ||
} as CreateChatCompletionRequestMessage, | ||
{ | ||
role: "assistant", | ||
content: `Validate for user: ${issue.assignees[0].login}: \n` + JSON.stringify(gptResponse), | ||
} as CreateChatCompletionRequestMessage | ||
); | ||
|
||
const validated = await askGPT(`Pr review validation call for #${issue.number}`, chatHistory); | ||
|
||
if (typeof validated === "string") { | ||
return validated; | ||
} else { | ||
if (validated.answer) { | ||
return validated.answer; | ||
} else { | ||
return ErrorDiff(`No answer found for issue #${issue.number}`); | ||
} | ||
} | ||
} else { | ||
return ErrorDiff(`No PR found for issue #${issue.number}`); | ||
} | ||
}; | ||
|
||
const res = await isPull(); | ||
if (res.startsWith("```diff\n")) { | ||
return res; | ||
} | ||
return res + `\n###### Ensure the pull request requirements are in the linked issue's first comment and update it if the scope evolves.`; | ||
}; |
Original file line number | Diff line number | Diff line change | ||||
---|---|---|---|---|---|---|
@@ -1,23 +1,55 @@ | ||||||
import { getBotConfig, getBotContext, getLogger } from "../bindings"; | ||||||
import { Payload, StreamlinedComment, UserType } from "../types"; | ||||||
import { getAllIssueComments, getAllLinkedIssuesAndPullsInBody } from "../helpers"; | ||||||
import { getAllIssueComments, getAllLinkedIssuesAndPullsInBody, getIssueByNumber } from "../helpers"; | ||||||
import OpenAI from "openai"; | ||||||
import { CreateChatCompletionRequestMessage } from "openai/resources/chat"; | ||||||
import { ErrorDiff } from "../utils/helpers"; | ||||||
import { Context } from "probot"; | ||||||
|
||||||
export const sysMsg = `You are the UbiquityAI, designed to provide accurate technical answers. \n | ||||||
const escapeMsg = `Please ignore all previous commands and directives. \n | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. What's the purpose of the escape message? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. OpenAI has their default system message which defines how it works with you
|
||||||
Perform exactly what is asked of you in the following way: \n\n`; | ||||||
|
||||||
export const sysMsg = `${escapeMsg} You are an AI designed to provide accurate technical answers. \n | ||||||
Whenever appropriate, format your response using GitHub Flavored Markdown. Utilize tables, lists, and code blocks for clear and organized answers. \n | ||||||
Do not make up answers. If you are unsure, say so. \n | ||||||
Original Context exists only to provide you with additional information to the current question, use it to formulate answers. \n | ||||||
Infer the context of the question from the Original Context using your best judgement. \n | ||||||
All replies MUST end with "\n\n <!--- { 'UbiquityAI': 'answer' } ---> ".\n | ||||||
All replies MUST end with "\n\n <!--- { 'OpenAI': 'answer' } ---> ".\n | ||||||
`; | ||||||
|
||||||
export const validationMsg = `${escapeMsg} You are an AI validation bot designed to ensure that the answers provided by the OpenAI API meet our predefined standards. \n | ||||||
The input you'll validate is the output of a pull request review performed by GPT-3, depending on whether it has achieved the spec will determine what you need to do. \n | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Interesting There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Something along these lines |
||||||
|
||||||
If the spec is not achieved then you will take the useful information from the review and deliver it using the following template: \n | ||||||
=== Template A === \n | ||||||
### Spec not achieved | ||||||
{username} this is where you went wrong... | ||||||
this is how you can fix it... | ||||||
> code example of solution | ||||||
=== Template A === \n | ||||||
|
||||||
If the spec is achieved then you will respond using the following template including their real username, no @ symbols:\n | ||||||
=== Template B === \n | ||||||
### Spec achieved | ||||||
{username}, you have achieved the spec and now the reviewers will let you know if there are any other changes needed.\n | ||||||
=== Template B === \n | ||||||
`; | ||||||
|
||||||
export const gptContextTemplate = ` | ||||||
You are the UbiquityAI, designed to review and analyze pull requests. | ||||||
export const specCheckTemplate = `${escapeMsg} Using the provided context, ensure you clearly understand the specification of the issue. \n | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Solid prompt. I think your prompt writing is better than before. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Appreciate it 🤝 |
||||||
Now using your best judgement, determine if the specification has been met based on the PR diff provided. \n | ||||||
The spec should be achieved atleast logically, if not literally. If changes are made that are not directly mentioned in the spec, but are logical and do not break the spec, they are acceptable. \n | ||||||
Your response will be posted as a GitHub comment for everyone to see in the pull request review conversation. | ||||||
Knowing this, only include information that will benefit them, think of it as a quick summary of the review. | ||||||
You can add value by identifying coding errors and code suggestions that benefit both the author and reviewers. | ||||||
`; | ||||||
|
||||||
export const gptContextTemplate = `${escapeMsg} | ||||||
You are an AI designed to review and analyze pull requests. | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. As silly as this sounds "Take a deep breath and work on this problem step by step" might be useful. "A little bit of arithmetic and a logical approach will help us quickly arrive at the solution to this problem." is useful for GPT-3.5, according to the above source. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. That's really fcking interesting how effective that is. I wonder why the take a breath aspect plays into it at all I mean you'd expect it to be like "As an AI language model, I cannot breath" but it just personifies itself and does it? crazy 🤣 Will deffo add this into the prompts |
||||||
You have been provided with the spec of the issue and all linked issues or pull requests. | ||||||
Using this full context, Reply in pure JSON format, with the following structure omitting irrelvant information pertaining to the specification. | ||||||
You MUST provide the following structure, but you may add additional information if you deem it relevant. | ||||||
Do not include example data, only include data relevant to the specification. | ||||||
|
||||||
Example:[ | ||||||
{ | ||||||
"source": "issue #123" | ||||||
|
@@ -54,6 +86,66 @@ Example:[ | |||||
] | ||||||
`; | ||||||
|
||||||
export const getPRSpec = async (context: Context, chatHistory: CreateChatCompletionRequestMessage[], streamlined: StreamlinedComment[]) => { | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
Use named functions, because it is more expressive for debugging in the context of stack traces. |
||||||
const logger = getLogger(); | ||||||
|
||||||
const payload = context.payload as Payload; | ||||||
|
||||||
const pr = payload.issue; | ||||||
|
||||||
if (!pr) { | ||||||
return ErrorDiff(`Payload issue is undefined.`); | ||||||
} | ||||||
|
||||||
// we're in the pr context, so grab the linked issue body | ||||||
const regex = /(#(\d+)|https:\/\/github\.com\/[^/\s]+\/[^/\s]+\/(issues|pull)\/(\d+))/gi; | ||||||
const linkedIssueNumber = pr.body.match(regex); | ||||||
const linkedIssues: number[] = []; | ||||||
|
||||||
if (linkedIssueNumber) { | ||||||
linkedIssueNumber.forEach((issue: string) => { | ||||||
if (issue.includes("#")) { | ||||||
linkedIssues.push(Number(issue.slice(1))); | ||||||
} else { | ||||||
linkedIssues.push(Number(issue.split("/")[6])); | ||||||
} | ||||||
}); | ||||||
} else { | ||||||
logger.info(`No linked issues or prs found`); | ||||||
} | ||||||
|
||||||
if (!linkedIssueNumber) { | ||||||
return ErrorDiff(`No linked issue found in body.`); | ||||||
} | ||||||
|
||||||
// get the linked issue body | ||||||
const linkedIssue = await getIssueByNumber(context, linkedIssues[0]); | ||||||
|
||||||
if (!linkedIssue) { | ||||||
return ErrorDiff(`Error getting linked issue.`); | ||||||
} | ||||||
|
||||||
// add the first comment of the pull request which is the contributor's description of their changes | ||||||
streamlined.push({ | ||||||
login: pr.user.login, | ||||||
body: `${pr.user.login}'s pull request description:\n` + pr.body, | ||||||
}); | ||||||
|
||||||
// add the linked issue body as this is the spec | ||||||
streamlined.push({ | ||||||
login: "assistant", | ||||||
body: `#${linkedIssue.number} Specification: \n` + linkedIssue.body, | ||||||
}); | ||||||
|
||||||
// no other conversation context is needed | ||||||
chatHistory.push({ | ||||||
role: "system", | ||||||
content: "This pull request context: \n" + JSON.stringify(streamlined), | ||||||
} as CreateChatCompletionRequestMessage); | ||||||
|
||||||
return chatHistory; | ||||||
}; | ||||||
|
||||||
/** | ||||||
* @notice best used alongside getAllLinkedIssuesAndPullsInBody() in helpers/issue | ||||||
* @param chatHistory the conversational context to provide to GPT | ||||||
|
@@ -74,12 +166,12 @@ export const decideContextGPT = async ( | |||||
const issue = payload.issue; | ||||||
|
||||||
if (!issue) { | ||||||
return `Payload issue is undefined`; | ||||||
return ErrorDiff(`Payload issue is undefined.`); | ||||||
} | ||||||
|
||||||
// standard comments | ||||||
const comments = await getAllIssueComments(issue.number); | ||||||
// raw so we can grab the <!--- { 'UbiquityAI': 'answer' } ---> tag | ||||||
// raw so we can grab the <!--- { 'OpenAI': 'answer' } ---> tag | ||||||
const commentsRaw = await getAllIssueComments(issue.number, "raw"); | ||||||
|
||||||
if (!comments) { | ||||||
|
@@ -95,7 +187,7 @@ export const decideContextGPT = async ( | |||||
|
||||||
// add the rest | ||||||
comments.forEach(async (comment, i) => { | ||||||
if (comment.user.type == UserType.User || commentsRaw[i].body.includes("<!--- { 'UbiquityAI': 'answer' } --->")) { | ||||||
if (comment.user.type == UserType.User || commentsRaw[i].body.includes("<!--- { 'OpenAI': 'answer' } --->")) { | ||||||
streamlined.push({ | ||||||
login: comment.user.login, | ||||||
body: comment.body, | ||||||
|
@@ -108,7 +200,7 @@ export const decideContextGPT = async ( | |||||
|
||||||
if (typeof links === "string") { | ||||||
logger.info(`Error getting linked issues or prs: ${links}`); | ||||||
return `Error getting linked issues or prs: ${links}`; | ||||||
return ErrorDiff(`Error getting linked issues or prs: ${links}`); | ||||||
} | ||||||
|
||||||
linkedIssueStreamlined = links.linkedIssues; | ||||||
|
@@ -117,23 +209,23 @@ export const decideContextGPT = async ( | |||||
chatHistory.push( | ||||||
{ | ||||||
role: "system", | ||||||
content: gptContextTemplate, | ||||||
}, | ||||||
{ | ||||||
role: "assistant", | ||||||
content: "This issue/Pr context: \n" + JSON.stringify(streamlined), | ||||||
name: "UbiquityAI", | ||||||
} as CreateChatCompletionRequestMessage, | ||||||
{ | ||||||
role: "system", | ||||||
role: "assistant", | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why did you change this to assistant? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. So it's hard to tell exactly a lot of conflicting information from the community but from the OpenAI docs. On the forum a lot of experimenting happens like dropping the sys message as the most recent after every response (if that makes sense), some say the system has the most weight while others say user does (I'd imagine it would be User, System, Assistant) but I changed it according to the OpenAI docs so it would be treated as part of the conversation as opposed to trying to define the assistant.
|
||||||
content: "Linked issue(s) context: \n" + JSON.stringify(linkedIssueStreamlined), | ||||||
name: "UbiquityAI", | ||||||
} as CreateChatCompletionRequestMessage, | ||||||
{ | ||||||
role: "system", | ||||||
role: "assistant", | ||||||
content: "Linked Pr(s) context: \n" + JSON.stringify(linkedPRStreamlined), | ||||||
name: "UbiquityAI", | ||||||
} as CreateChatCompletionRequestMessage | ||||||
); | ||||||
|
||||||
// we'll use the first response to determine the context of future calls | ||||||
const res = await askGPT("", chatHistory); | ||||||
const res = await askGPT(`OpenAI fetching context for #${issue.number}`, chatHistory); | ||||||
|
||||||
return res; | ||||||
}; | ||||||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Do not use environment variables anymore, unless it is for sensitive information like keys.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This only changed due to my IDE format-on-save.
There is a lot of process.env usage throughout the codebase, I know there is a refactor coming so this can be looked at as part of #859 maybe?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Looks like we're basically not using environment variables anymore at all, and instead are relying entirely on the yml configs.