Skip to content

fixes for multi-model serving #15

fixes for multi-model serving

fixes for multi-model serving #15

Workflow file for this run

name: rLLM with llama.cpp
on:
push:
branches: [ "main" ]
pull_request:
branches: [ "main" ]
env:
CARGO_TERM_COLOR: always
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
submodules: true
- uses: hendrikmuhs/[email protected]
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true
- name: Build cpp-rllm
run: cargo build --verbose --release --no-default-features
working-directory: cpp-rllm
- run: strip target/release/cpp-rllm
- name: Artifact upload
uses: actions/upload-artifact@v4
with:
name: cpp-rllm
path: target/release/cpp-rllm