Skip to content

Commit

Permalink
test with ollama
Browse files Browse the repository at this point in the history
  • Loading branch information
sachinsshetty committed Mar 17, 2024
1 parent cf32b54 commit 074b019
Show file tree
Hide file tree
Showing 5 changed files with 75 additions and 1 deletion.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
src/ollama/ollama
venv/
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
Expand Down
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,5 @@ Preferred only MIT sources and open-weights

- v0
- ollama + open-webui + mistral-7B + docker
- vscodium + continue + ollama + mistral-7B
- vscodium + continue + ollama + mistral-7B
- python + langchain
15 changes: 15 additions & 0 deletions docs/tune-nodels.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
Fine tune models with Private Dataset

- Mistral Format - JSONL
- {"text":"<s>[INST] Instruction[/INST] Model answer</s>[INST] Follow-up instruction[/INST]"}


- Install
- pip install langchain

Reference
- https://github.com/SkunkworksAI/BakLLaVA
- https://python.langchain.com/docs/integrations/llms/ollama
- https://www.datacamp.com/tutorial/fine-tuning-llama-2
- https://apeatling.com/articles/part-2-building-your-training-data-for-fine-tuning/
- https://jsonlines.org/
45 changes: 45 additions & 0 deletions src/ollama/bhoomi/llm_image.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
from langchain_community.llms import Ollama

bakllava = Ollama(model="bakllava")


import base64
from io import BytesIO

from IPython.display import HTML, display
from PIL import Image


def convert_to_base64(pil_image):
"""
Convert PIL images to Base64 encoded strings
:param pil_image: PIL image
:return: Re-sized Base64 string
"""

buffered = BytesIO()
pil_image.save(buffered, format="JPEG") # You can change the format if needed
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
return img_str


def plt_img_base64(img_base64):
"""
Display base64 encoded string as image
:param img_base64: Base64 string
"""
# Create an HTML img tag with the base64 string as the source
image_html = f'<img src="data:image/jpeg;base64,{img_base64}" />'
# Display the image by rendering the HTML
display(HTML(image_html))


file_path = "../../../static/img/ollama_example_img.jpg"
pil_image = Image.open(file_path)
image_b64 = convert_to_base64(pil_image)
plt_img_base64(image_b64)

llm_with_image_context = bakllava.bind(images=[image_b64])
llm_with_image_context.invoke("What is the dollar based gross retention rate:")
12 changes: 12 additions & 0 deletions src/ollama/bhoomi/main.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,15 @@
print("Welcome to Bhoomi")

from langchain_community.llms import Ollama

llm = Ollama(model="mistral")

# test 1

# llm.invoke("Tell me a joke")

# test 2
query = "Tell me a joke"

for chunks in llm.stream(query):
print(chunks)

0 comments on commit 074b019

Please sign in to comment.