-
-
Notifications
You must be signed in to change notification settings - Fork 3
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
cf32b54
commit 074b019
Showing
5 changed files
with
75 additions
and
1 deletion.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,4 +1,5 @@ | ||
src/ollama/ollama | ||
venv/ | ||
# Byte-compiled / optimized / DLL files | ||
__pycache__/ | ||
*.py[cod] | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,15 @@ | ||
Fine tune models with Private Dataset | ||
|
||
- Mistral Format - JSONL | ||
- {"text":"<s>[INST] Instruction[/INST] Model answer</s>[INST] Follow-up instruction[/INST]"} | ||
|
||
|
||
- Install | ||
- pip install langchain | ||
|
||
Reference | ||
- https://github.com/SkunkworksAI/BakLLaVA | ||
- https://python.langchain.com/docs/integrations/llms/ollama | ||
- https://www.datacamp.com/tutorial/fine-tuning-llama-2 | ||
- https://apeatling.com/articles/part-2-building-your-training-data-for-fine-tuning/ | ||
- https://jsonlines.org/ |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,45 @@ | ||
from langchain_community.llms import Ollama | ||
|
||
bakllava = Ollama(model="bakllava") | ||
|
||
|
||
import base64 | ||
from io import BytesIO | ||
|
||
from IPython.display import HTML, display | ||
from PIL import Image | ||
|
||
|
||
def convert_to_base64(pil_image): | ||
""" | ||
Convert PIL images to Base64 encoded strings | ||
:param pil_image: PIL image | ||
:return: Re-sized Base64 string | ||
""" | ||
|
||
buffered = BytesIO() | ||
pil_image.save(buffered, format="JPEG") # You can change the format if needed | ||
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8") | ||
return img_str | ||
|
||
|
||
def plt_img_base64(img_base64): | ||
""" | ||
Display base64 encoded string as image | ||
:param img_base64: Base64 string | ||
""" | ||
# Create an HTML img tag with the base64 string as the source | ||
image_html = f'<img src="data:image/jpeg;base64,{img_base64}" />' | ||
# Display the image by rendering the HTML | ||
display(HTML(image_html)) | ||
|
||
|
||
file_path = "../../../static/img/ollama_example_img.jpg" | ||
pil_image = Image.open(file_path) | ||
image_b64 = convert_to_base64(pil_image) | ||
plt_img_base64(image_b64) | ||
|
||
llm_with_image_context = bakllava.bind(images=[image_b64]) | ||
llm_with_image_context.invoke("What is the dollar based gross retention rate:") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,3 +1,15 @@ | ||
print("Welcome to Bhoomi") | ||
|
||
from langchain_community.llms import Ollama | ||
|
||
llm = Ollama(model="mistral") | ||
|
||
# test 1 | ||
|
||
# llm.invoke("Tell me a joke") | ||
|
||
# test 2 | ||
query = "Tell me a joke" | ||
|
||
for chunks in llm.stream(query): | ||
print(chunks) |