-
Notifications
You must be signed in to change notification settings - Fork 3
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
examples for downloading data and for testing the model
- Loading branch information
1 parent
ce8b00b
commit 5614507
Showing
3 changed files
with
328 additions
and
1 deletion.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,159 @@ | ||
{ | ||
"cells": [ | ||
{ | ||
"cell_type": "markdown", | ||
"id": "f1cd554b-2318-41d1-969f-46d7cafd1b40", | ||
"metadata": {}, | ||
"source": [ | ||
"# Testing the model\n", | ||
"Here we just test our fine-tuned model." | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 1, | ||
"id": "be1df990-5cbf-4f3e-9059-9fae25f8d3ae", | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"def prompt_hf(request, model=\"haesleinhuepf/gemma-2b-it-bia-proof-of-concept2\"):\n", | ||
" global prompt_hf\n", | ||
" import transformers\n", | ||
" import torch\n", | ||
" \n", | ||
" if prompt_hf._pipeline is None: \n", | ||
" prompt_hf._pipeline = transformers.pipeline(\n", | ||
" \"text-generation\", model=model, model_kwargs={\"torch_dtype\": torch.bfloat16}, device_map=\"auto\",\n", | ||
" max_new_tokens=200\n", | ||
" )\n", | ||
" \n", | ||
" return prompt_hf._pipeline(request)[0]['generated_text']\n", | ||
"prompt_hf._pipeline = None" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 2, | ||
"id": "958d16ef-f3fa-412f-be38-76101d63a2e5", | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"data": { | ||
"application/vnd.jupyter.widget-view+json": { | ||
"model_id": "ade3065e46914ab5b27eac86d175a10a", | ||
"version_major": 2, | ||
"version_minor": 0 | ||
}, | ||
"text/plain": [ | ||
"Downloading shards: 0%| | 0/2 [00:00<?, ?it/s]" | ||
] | ||
}, | ||
"metadata": {}, | ||
"output_type": "display_data" | ||
}, | ||
{ | ||
"name": "stderr", | ||
"output_type": "stream", | ||
"text": [ | ||
"`config.hidden_act` is ignored, you should use `config.hidden_activation` instead.\n", | ||
"Gemma's activation function will be set to `gelu_pytorch_tanh`. Please, use\n", | ||
"`config.hidden_activation` if you want to override this behaviour.\n", | ||
"See https://github.com/huggingface/transformers/pull/29402 for more details.\n" | ||
] | ||
}, | ||
{ | ||
"data": { | ||
"application/vnd.jupyter.widget-view+json": { | ||
"model_id": "adc6bf6e815348649fd315678923a671", | ||
"version_major": 2, | ||
"version_minor": 0 | ||
}, | ||
"text/plain": [ | ||
"Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]" | ||
] | ||
}, | ||
"metadata": {}, | ||
"output_type": "display_data" | ||
}, | ||
{ | ||
"name": "stderr", | ||
"output_type": "stream", | ||
"text": [ | ||
"Some parameters are on the meta device device because they were offloaded to the cpu.\n", | ||
"C:\\Users\\rober\\miniconda3\\envs\\genai-gpu\\Lib\\site-packages\\transformers\\models\\gemma\\modeling_gemma.py:482: UserWarning: 1Torch was not compiled with flash attention. (Triggered internally at C:\\cb\\pytorch_1000000000000\\work\\aten\\src\\ATen\\native\\transformers\\cuda\\sdp_utils.cpp:555.)\n", | ||
" attn_output = torch.nn.functional.scaled_dot_product_attention(\n" | ||
] | ||
}, | ||
{ | ||
"data": { | ||
"text/markdown": [ | ||
"Write Python code for cropping an image in X and Y to coordinates 10-20 and 30-50 respectively.\n", | ||
"\n", | ||
"```python\n", | ||
"import cv2\n", | ||
"\n", | ||
"# Load the image\n", | ||
"image = cv2.imread(\"image.jpg\")\n", | ||
"\n", | ||
"# Crop the image\n", | ||
"cropped_image = image[10:20, 30:50]\n", | ||
"\n", | ||
"# Save the cropped image\n", | ||
"cv2.imwrite(\"cropped_image.jpg\", cropped_image)\n", | ||
"```\n", | ||
"\n", | ||
"**Explanation:**\n", | ||
"\n", | ||
"* `cv2.imread(\"image.jpg\")` loads the image from the file \"image.jpg\".\n", | ||
"* `image[10:20, 30:50]` crops the image by specifying the coordinates of the top-left and bottom-right corners of the crop.\n", | ||
"* `cv2.imwrite(\"cropped_image.jpg\", cropped_image)` saves the cropped image to the file \"cropped_image.jpg\".\n", | ||
"\n", | ||
"**Note:**\n", | ||
"\n", | ||
"* The `[10:20, 30:50]` coordinates represent the height" | ||
], | ||
"text/plain": [ | ||
"<IPython.core.display.Markdown object>" | ||
] | ||
}, | ||
"metadata": {}, | ||
"output_type": "display_data" | ||
} | ||
], | ||
"source": [ | ||
"from IPython.display import Markdown, display\n", | ||
"result = prompt_hf(\"Write Python code for cropping an image in X and Y to coordinates 10-20 and 30-50\")\n", | ||
"display(Markdown(result))" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"id": "d0a04961-d213-47d2-a218-8eacd242a35d", | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [] | ||
} | ||
], | ||
"metadata": { | ||
"kernelspec": { | ||
"display_name": "Python 3 (ipykernel)", | ||
"language": "python", | ||
"name": "python3" | ||
}, | ||
"language_info": { | ||
"codemirror_mode": { | ||
"name": "ipython", | ||
"version": 3 | ||
}, | ||
"file_extension": ".py", | ||
"mimetype": "text/x-python", | ||
"name": "python", | ||
"nbconvert_exporter": "python", | ||
"pygments_lexer": "ipython3", | ||
"version": "3.11.9" | ||
} | ||
}, | ||
"nbformat": 4, | ||
"nbformat_minor": 5 | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters