Skip to content

feat(wren-ai-service): litellm integration #1707

feat(wren-ai-service): litellm integration

feat(wren-ai-service): litellm integration #1707

# This workflow will install Python dependencies, run tests and lint with a single version of Python
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
name: AI Service Test
on:
push:
branches:
- main
paths:
- "wren-ai-service/**"
pull_request:
types: [synchronize, labeled]
workflow_dispatch:
permissions:
contents: read
concurrency:
# avoid mis-canceling the ci runs while other labels are added to the PR, so we add the label name as the condition
group: ${{ github.workflow }}-${{ github.event_name == 'pull_request' && github.event.label.name == 'ci/ai-service' && github.event.number || github.sha }}
cancel-in-progress: true
defaults:
run:
working-directory: wren-ai-service
jobs:
pytest:
if: ${{ contains(github.event.pull_request.labels.*.name, 'ci/ai-service') || github.event_name == 'push' }}
runs-on: ubuntu-22.04
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
- name: Install Poetry
uses: abatilo/actions-poetry@v3
with:
poetry-version: "1.8.3"
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version-file: ./wren-ai-service/pyproject.toml
cache: "poetry"
- name: Install the project dependencies
run: poetry install
- name: Install Just
uses: extractions/setup-just@v2
with:
just-version: "1.36.0"
- name: Prepare testing environment and Run tests
run: |
just test
env:
ENV: dev
LLM_PROVIDER: openai_llm
EMBEDDER_PROVIDER: openai_embedder
DOCUMENT_STORE_PROVIDER: qdrant
LLM_OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
EMBEDDER_OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
GENERATION_MODEL: gpt-4o-mini
WREN_ENGINE_ENDPOINT: http://localhost:8080
WREN_UI_ENDPOINT: http://localhost:3000
QDRANT_HOST: localhost