Skip to content

feat: Draft ollama test #23

feat: Draft ollama test

feat: Draft ollama test #23

Workflow file for this run

name: test | ollama
on:
workflow_dispatch:
pull_request:
types: [ labeled, synchronize ]
jobs:
run_simple_example_test:
runs-on: ubuntu-latest
services:
ollama:
image: ollama/ollama
ports:
- 11434:11434
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: '3.12.x'
- name: Install Poetry
uses: snok/[email protected]
with:
virtualenvs-create: true
virtualenvs-in-project: true
installer-parallel: true
- name: Install dependencies
run: |
poetry install --no-interaction --all-extras
- name: Install ollama
run: curl -fsSL https://ollama.com/install.sh | sh
- name: Run ollama
run: |
ollama serve &
ollama pull llama3.2 &
ollama pull avr/sfr-embedding-mistral:latest
- name: Call ollama API
run: |
curl -d '{"model": "llama3.2", "stream": false, "prompt":"Whatever I say, asnwer with Yes"}' http://localhost:11434/api/generate
- name: Wait for Ollama to be ready
run: |
for i in {1..30}; do
if curl -s http://localhost:11434/api/tags > /dev/null; then
echo "Ollama is ready"
exit 0
fi
echo "Waiting for Ollama... attempt $i"
sleep 2
done
echo "Ollama failed to start"
exit 1
- name: Dump Docker logs
run: |
docker ps
docker logs $(docker ps --filter "ancestor=ollama/ollama" --format "{{.ID}}")
- name: Run example test
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
GRAPHISTRY_USERNAME: ${{ secrets.GRAPHISTRY_USERNAME }}
GRAPHISTRY_PASSWORD: ${{ secrets.GRAPHISTRY_PASSWORD }}
PYTHONFAULTHANDLER: 1
LLM_API_KEY: "ollama"
LLM_PROVIDER: "ollama"
LLM_ENDPOINT: "http://127.0.0.1:11434/v1/chat/completions"
LLM_MODEL: "ollama/llama3.2"
EMBEDDING_PROVIDER: "ollama"
EMBEDDING_MODEL: "avr/sfr-embedding-mistral:latest"
EMBEDDING_ENDPOINT: "http://127.0.0.1:11434/api/embeddings"
EMBEDDING_DIMENSIONS: "4096"
HUGGINGFACE_TOKENIZER: "Salesforce/SFR-Embedding-Mistral"
run: poetry run python ./examples/python/simple_example.py