Skip to content

Commit

Permalink
Merge pull request #57 from StampyAI/level-modes
Browse files Browse the repository at this point in the history
create level modes
  • Loading branch information
FraserLee committed Jul 12, 2023
2 parents 64a48c6 + 4792b63 commit 42052e4
Show file tree
Hide file tree
Showing 3 changed files with 77 additions and 12 deletions.
31 changes: 24 additions & 7 deletions api/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def cap(text: str, max_tokens: int) -> str:



def construct_prompt(query: str, history: List[Dict[str, str]], context: List[Block]) -> List[Dict[str, str]]:
def construct_prompt(query: str, mode: str, history: List[Dict[str, str]], context: List[Block]) -> List[Dict[str, str]]:

prompt = []

Expand Down Expand Up @@ -114,7 +114,24 @@ def construct_prompt(query: str, history: List[Dict[str, str]], context: List[Bl

question_prompt = "In your answer, please cite any claims you make back to each source " \
"using the format: [a], [b], etc. If you use multiple sources to make a claim " \
"cite all of them. For example: \"AGI is concerning [c, d, e].\"\n\nQ: " + query
"cite all of them. For example: \"AGI is concerning [c, d, e].\"\n\n"

if mode == "concise":
question_prompt += "Answer very concisely, getting to the crux of the matter in as " \
"few words as possible. Limit your answer to 1-2 sentences.\n\n"

elif mode == "rookie":
question_prompt += "This user is new to the field of AI Alignment and Safety - don't " \
"assume they know any technical terms or jargon. Still give a complete answer " \
"without patronizing the user, but take any extra time needed to " \
"explain new concepts or to illustrate your answer with examples. "\
"Put extra effort into explaining the intuition behind concepts " \
"rather than just giving a formal definition.\n\n"

elif mode != "default": raise ValueError("Invalid mode: " + mode)


question_prompt += "Q: " + query

prompt.append({"role": "user", "content": question_prompt})

Expand All @@ -124,7 +141,7 @@ def construct_prompt(query: str, history: List[Dict[str, str]], context: List[Bl
import time
import json

def talk_to_robot_internal(index, query: str, history: List[Dict[str, str]], k: int = STANDARD_K, log: Callable = print):
def talk_to_robot_internal(index, query: str, mode: str, history: List[Dict[str, str]], k: int = STANDARD_K, log: Callable = print):
try:
# 1. Find the most relevant blocks from the Alignment Research Dataset
yield {"state": "loading", "phase": "semantic"}
Expand All @@ -134,7 +151,7 @@ def talk_to_robot_internal(index, query: str, history: List[Dict[str, str]], k:

# 2. Generate a prompt
yield {"state": "loading", "phase": "prompt"}
prompt = construct_prompt(query, history, top_k_blocks)
prompt = construct_prompt(query, mode, history, top_k_blocks)

# 3. Count number of tokens left for completion (-50 for a buffer)
max_tokens_completion = NUM_TOKENS - sum([len(ENCODER.encode(message["content"]) + ENCODER.encode(message["role"])) for message in prompt]) - 50
Expand Down Expand Up @@ -187,14 +204,14 @@ def talk_to_robot_internal(index, query: str, history: List[Dict[str, str]], k:
yield {'state': 'error', 'error': str(e)}

# convert talk_to_robot_internal from dict generator into json generator
def talk_to_robot(index, query: str, history: List[Dict[str, str]], k: int = STANDARD_K, log: Callable = print):
yield from (json.dumps(block) for block in talk_to_robot_internal(index, query, history, k, log))
def talk_to_robot(index, query: str, mode: str, history: List[Dict[str, str]], k: int = STANDARD_K, log: Callable = print):
yield from (json.dumps(block) for block in talk_to_robot_internal(index, query, mode, history, k, log))

# wayyy simplified api
def talk_to_robot_simple(index, query: str, log: Callable = print):
res = {'response': ''}

for block in talk_to_robot_internal(index, query, [], log = log):
for block in talk_to_robot_internal(index, query, "default", [], log = log):
if block['state'] == 'loading' and block['phase'] == 'semantic' and 'citations' in block:
citations = {}
for i, c in enumerate(block['citations']):
Expand Down
3 changes: 2 additions & 1 deletion api/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,10 @@ def semantic():
def chat():

query = request.json['query']
mode = request.json['mode']
history = request.json['history']

return Response(stream(talk_to_robot(PINECONE_INDEX, query, history, log = log)), mimetype='text/event-stream')
return Response(stream(talk_to_robot(PINECONE_INDEX, query, mode, history, log = log)), mimetype='text/event-stream')


# ------------- simplified non-streaming chat for internal testing -------------
Expand Down
55 changes: 51 additions & 4 deletions web/src/pages/index.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ const API_URL = process.env.NEXT_PUBLIC_API_URL || "http://127.0.0.1:3000";
import Head from "next/head";
import React from "react";
import { type NextPage } from "next";
import { useState } from "react";
import { useState, useEffect } from "react";
import Link from "next/link";
import Image from 'next/image';

Expand Down Expand Up @@ -242,6 +242,8 @@ type State = {
response: AssistantEntry;
};

type Mode = "rookie" | "concise" | "default";


// smooth-scroll to the bottom of the window if we're already less than 30% a screen away
// note: finicky interaction with "smooth" - maybe fix later.
Expand All @@ -256,6 +258,21 @@ const Home: NextPage = () => {
const [ runningIndex, setRunningIndex ] = useState(0);
const [ loadState, setLoadState ] = useState<State>({state: "idle"});

// [state, ready to save to localstorage]
const [ mode, setMode ] = useState<[Mode, boolean]>(["default", false]);

// store mode in localstorage
useEffect(() => {
if (mode[1]) localStorage.setItem("chat_mode", mode[0]);
}, [mode]);

// initial load
useEffect(() => {
const mode = localStorage.getItem("chat_mode") as Mode || "default";
setMode([mode, true]);
}, []);


const search = async (
query: string,
query_source: "search" | "followups",
Expand Down Expand Up @@ -287,7 +304,7 @@ const Home: NextPage = () => {
"Allow-Control-Allow-Origin": "*"
},

body: JSON.stringify({query: query, history:
body: JSON.stringify({query: query, mode: mode[0], history:
old_entries.filter((entry) => entry.role !== "error")
.map((entry) => {
return {
Expand Down Expand Up @@ -434,7 +451,7 @@ const Home: NextPage = () => {
const data = (await res.json()).data;

setEntries([...new_entries, {
role: "stampy",
role: "stampy",
content: data.text,
url: "https://aisafety.info/?state=" + data.pageid,
}]);
Expand All @@ -453,7 +470,7 @@ const Home: NextPage = () => {

enable((f_old: Followup[]) => {
const f_old_filtered = f_old.filter((f) => f.pageid !== data.pageid && !fpids.has(f.pageid));
return [...f_new, ...f_old_filtered].slice(0, MAX_FOLLOWUPS); // this is correct, it's N and not N-1 in javascript fsr
return [...f_new, ...f_old_filtered].slice(0, MAX_FOLLOWUPS); // this is correct, it's N and not N-1 in javascript fsr
});

scroll30();
Expand All @@ -467,8 +484,38 @@ const Home: NextPage = () => {
</Head>
<main>
<Header page="index" />
{/* three buttons for the three modes, place far right, 1rem between each */}
<div className="flex flex-row justify-center w-fit ml-auto mr-0 mb-5 gap-2">
<button className={
"border border-gray-300 px-1 " + (mode[1] && mode[0] === "rookie" ? "bg-gray-200" : "")
} onClick={() => { setMode(["rookie", true]); }}
title="For people who are new to the field of AI alignment. The
answer might be longer, since technical terms will be
explained in more detail and less background will be
assumed.">
rookie
</button>
//
<button className={
"border border-gray-300 px-1 " + (mode[1] && mode[0] === "concise" ? "bg-gray-200" : "")
} onClick={() => { setMode(["concise", true]); }}
title="Quick and to the point. Followup questions may need to be
asked to get the full picture of what's going on.">
concise
</button>
//
<button className={
"border border-gray-300 px-1 " + (mode[1] && mode[0] === "default" ? "bg-gray-200" : "")
} onClick={() => { setMode(["default", true]); }}
title="A balanced default mode.">
default
</button>
</div>


<h2 className="bg-red-100 text-red-800"><b>WARNING</b>: This is a very <b>early prototype</b> using data through June 2022. <Link href="http://bit.ly/stampy-chat-issues" target="_blank">Feedback</Link> welcomed.</h2>


<ul>
{entries.map((entry, i) => {
switch (entry.role) {
Expand Down

0 comments on commit 42052e4

Please sign in to comment.