diff --git a/src/continue-dev/config.json b/src/continue-dev/config.json index f2fb4a0..14b92e2 100644 --- a/src/continue-dev/config.json +++ b/src/continue-dev/config.json @@ -5,6 +5,12 @@ "provider": "ollama", "model": "mistral:latest", "apiBase": "http://localhost:11434/" + }, + { + "title": "Gemma2B", + "provider": "ollama", + "model": "gemma:2b", + "apiBase": "http://localhost:11434/" } ], "slashCommands": [ @@ -63,7 +69,7 @@ "provider": "ollama", "model": "mistral:latest" }, - "allowAnonymousTelemetry": true, + "allowAnonymousTelemetry": false, "embeddingsProvider": { "provider": "transformers.js" } diff --git a/src/ollama/bhoomi/json_output.py b/src/ollama/bhoomi/json_output.py new file mode 100644 index 0000000..992d99e --- /dev/null +++ b/src/ollama/bhoomi/json_output.py @@ -0,0 +1,41 @@ +import json + +from langchain_community.chat_models import ChatOllama +from langchain_core.messages import HumanMessage +from langchain_core.output_parsers import StrOutputParser +from langchain_core.prompts import ChatPromptTemplate + +json_schema = { + "title": "Person", + "description": "Identifying information about a person.", + "type": "object", + "properties": { + "name": {"title": "Name", "description": "The person's name", "type": "string"}, + "age": {"title": "Age", "description": "The person's age", "type": "integer"}, + "fav_food": { + "title": "Fav Food", + "description": "The person's favorite food", + "type": "string", + }, + }, + "required": ["name", "age"], +} + +llm = ChatOllama(model="gemma:2b") + +messages = [ + HumanMessage( + content="Please tell me about a person using the following JSON schema:" + ), + HumanMessage(content="{dumps}"), + HumanMessage( + content="Now, considering the schema, tell me about a person named Sachin who is 32 years old and loves Biryani." + ), +] + +prompt = ChatPromptTemplate.from_messages(messages) +dumps = json.dumps(json_schema, indent=2) + +chain = prompt | llm | StrOutputParser() + +print(chain.invoke({"dumps": dumps})) \ No newline at end of file diff --git a/src/ollama/bhoomi/main.py b/src/ollama/bhoomi/main.py index afd052f..1859930 100644 --- a/src/ollama/bhoomi/main.py +++ b/src/ollama/bhoomi/main.py @@ -2,14 +2,19 @@ from langchain_community.llms import Ollama -llm = Ollama(model="mistral") +llm = Ollama(model="gemma:2b") # test 1 # llm.invoke("Tell me a joke") # test 2 -query = "Tell me a joke" +query = "What are the top 3 important facilities required for a Robot to survive on Mars?" -for chunks in llm.stream(query): +query2 = "What top 5 Sensors are necessary for Robots on Mars?" + +query3 = "Write a python program to connect the 5 sensors" + + +for chunks in llm.stream(query2): print(chunks) \ No newline at end of file