Skip to content

Commit

Permalink
Update prompts.py
Browse files Browse the repository at this point in the history
  • Loading branch information
RCW2000 authored Mar 2, 2024
1 parent ab4a766 commit de9cee2
Showing 1 changed file with 2 additions and 11 deletions.
13 changes: 2 additions & 11 deletions helpers/prompts.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,6 @@
import sys
from yachalk import chalk
sys.path.append("..")

import json
#import ollama.client as client

#beam try
import transformers
from transformers import BloomForCausalLM
Expand All @@ -14,7 +10,7 @@
bmodel = BloomForCausalLM.from_pretrained("bigscience/bloom-1b3")
tokenizer = BloomTokenizerFast.from_pretrained("bigscience/bloom-1b3")

def extractConcepts(prompt: str, metadata={}, model="mistral-openorca:latest"):
def extractConcepts(prompt: str):
SYS_PROMPT = (
"Your task is extract the key concepts (and non personal entities) mentioned in the given context. "
"Extract only the most important and atomistic concepts, if needed break the concepts down to the simpler concepts."
Expand All @@ -36,7 +32,6 @@ def extractConcepts(prompt: str, metadata={}, model="mistral-openorca:latest"):
response=tokenizer.decode(bmodel.generate(inputs["input_ids"],
max_length=result_size
)[0])
#response, _ = client.generate(model_name=model, system=SYS_PROMPT, prompt=prompt)
try:
result = json.loads(response)
result = [dict(item, **metadata) for item in result]
Expand All @@ -46,10 +41,7 @@ def extractConcepts(prompt: str, metadata={}, model="mistral-openorca:latest"):
return result


def graphPrompt(input: str, metadata={}, model="mistral-openorca:latest"):
if model == None:
model = "mistral-openorca:latest"

def graphPrompt(input: str):
# model_info = client.show(model_name=model)
# print( chalk.blue(model_info))

Expand Down Expand Up @@ -84,7 +76,6 @@ def graphPrompt(input: str, metadata={}, model="mistral-openorca:latest"):
max_length=result_size
)[0])
print(response)
#response, _ = client.generate(model_name=model, system=SYS_PROMPT, prompt=USER_PROMPT)
try:
result = json.loads(response)
result = [dict(item, **metadata) for item in result]
Expand Down

0 comments on commit de9cee2

Please sign in to comment.