diff --git a/.gitignore b/.gitignore
index 94749ee..e9358d8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,5 @@
src/ollama/ollama
+venv/
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
diff --git a/README.md b/README.md
index 067a4e6..b99e9b5 100644
--- a/README.md
+++ b/README.md
@@ -6,4 +6,5 @@ Preferred only MIT sources and open-weights
- v0
- ollama + open-webui + mistral-7B + docker
- - vscodium + continue + ollama + mistral-7B
\ No newline at end of file
+ - vscodium + continue + ollama + mistral-7B
+ - python + langchain
\ No newline at end of file
diff --git a/docs/tune-nodels.md b/docs/tune-nodels.md
new file mode 100644
index 0000000..35a52ad
--- /dev/null
+++ b/docs/tune-nodels.md
@@ -0,0 +1,15 @@
+Fine tune models with Private Dataset
+
+- Mistral Format - JSONL
+- {"text":"[INST] Instruction[/INST] Model answer[INST] Follow-up instruction[/INST]"}
+
+
+- Install
+ - pip install langchain
+
+Reference
+- https://github.com/SkunkworksAI/BakLLaVA
+- https://python.langchain.com/docs/integrations/llms/ollama
+- https://www.datacamp.com/tutorial/fine-tuning-llama-2
+- https://apeatling.com/articles/part-2-building-your-training-data-for-fine-tuning/
+- https://jsonlines.org/
\ No newline at end of file
diff --git a/src/ollama/bhoomi/llm_image.py b/src/ollama/bhoomi/llm_image.py
new file mode 100644
index 0000000..1a333fc
--- /dev/null
+++ b/src/ollama/bhoomi/llm_image.py
@@ -0,0 +1,45 @@
+from langchain_community.llms import Ollama
+
+bakllava = Ollama(model="bakllava")
+
+
+import base64
+from io import BytesIO
+
+from IPython.display import HTML, display
+from PIL import Image
+
+
+def convert_to_base64(pil_image):
+ """
+ Convert PIL images to Base64 encoded strings
+
+ :param pil_image: PIL image
+ :return: Re-sized Base64 string
+ """
+
+ buffered = BytesIO()
+ pil_image.save(buffered, format="JPEG") # You can change the format if needed
+ img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
+ return img_str
+
+
+def plt_img_base64(img_base64):
+ """
+ Display base64 encoded string as image
+
+ :param img_base64: Base64 string
+ """
+ # Create an HTML img tag with the base64 string as the source
+ image_html = f'
'
+ # Display the image by rendering the HTML
+ display(HTML(image_html))
+
+
+file_path = "../../../static/img/ollama_example_img.jpg"
+pil_image = Image.open(file_path)
+image_b64 = convert_to_base64(pil_image)
+plt_img_base64(image_b64)
+
+llm_with_image_context = bakllava.bind(images=[image_b64])
+llm_with_image_context.invoke("What is the dollar based gross retention rate:")
\ No newline at end of file
diff --git a/src/ollama/bhoomi/main.py b/src/ollama/bhoomi/main.py
index 6bf2939..afd052f 100644
--- a/src/ollama/bhoomi/main.py
+++ b/src/ollama/bhoomi/main.py
@@ -1,3 +1,15 @@
print("Welcome to Bhoomi")
+from langchain_community.llms import Ollama
+llm = Ollama(model="mistral")
+
+# test 1
+
+# llm.invoke("Tell me a joke")
+
+# test 2
+query = "Tell me a joke"
+
+for chunks in llm.stream(query):
+ print(chunks)
\ No newline at end of file