diff --git a/.buildkite/release-pipeline.yaml b/.buildkite/release-pipeline.yaml index 2de6fceb0c3fe..93e118fb3eab8 100644 --- a/.buildkite/release-pipeline.yaml +++ b/.buildkite/release-pipeline.yaml @@ -39,19 +39,3 @@ steps: - "aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7" - "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --build-arg CUDA_VERSION=12.1.0 --tag public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT --target vllm-openai --progress plain ." - "docker push public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT" - - - label: "Build and publish TPU release image" - depends_on: ~ - if: build.env("NIGHTLY") == "1" - agents: - queue: tpu_queue_postmerge - commands: - - "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --tag vllm/vllm-tpu:nightly --tag vllm/vllm-tpu:$BUILDKITE_COMMIT --progress plain -f Dockerfile.tpu ." - - "docker push vllm/vllm-tpu:nightly" - - "docker push vllm/vllm-tpu:$BUILDKITE_COMMIT" - plugins: - - docker-login#v3.0.0: - username: vllm - password-env: DOCKERHUB_TOKEN - env: - DOCKER_BUILDKIT: "1" diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 8f57006214c88..bf0de3f69f14e 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -237,7 +237,7 @@ steps: source_file_dependencies: - vllm/lora - tests/lora - command: pytest -v -s lora --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT --ignore=lora/test_long_context.py --ignore=lora/test_chatglm3_tp.py --ignore=lora/test_llama_tp.py + command: pytest -v -s lora --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT --ignore lora/test_long_context.py lora/test_chatglm3_tp.py lora/test_llama_tp.py parallelism: 4 - label: "PyTorch Fullgraph Smoke Test" # 9min @@ -362,7 +362,6 @@ steps: - tests/models/embedding/vision_language - tests/models/encoder_decoder/vision_language commands: - - pip install git+https://github.com/TIGER-AI-Lab/Mantis.git - pytest -v -s models/decoder_only/audio_language -m 'core_model or quant_model' - pytest -v -s --ignore models/decoder_only/vision_language/test_phi3v.py models/decoder_only/vision_language -m 'core_model or quant_model' - pytest -v -s models/embedding/vision_language -m core_model @@ -378,7 +377,6 @@ steps: - tests/models/embedding/vision_language - tests/models/encoder_decoder/vision_language commands: - - pip install git+https://github.com/TIGER-AI-Lab/Mantis.git - pytest -v -s models/decoder_only/audio_language -m 'not core_model and not quant_model' # HACK - run phi3v tests separately to sidestep this transformers bug # https://github.com/huggingface/transformers/issues/34307 diff --git a/.github/workflows/lint-and-deploy.yaml b/.github/workflows/lint-and-deploy.yaml deleted file mode 100644 index ab6f6e5d2060d..0000000000000 --- a/.github/workflows/lint-and-deploy.yaml +++ /dev/null @@ -1,81 +0,0 @@ -name: Lint and Deploy Charts - -on: pull_request - -jobs: - lint-and-deploy: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - fetch-depth: 0 - - - name: Set up Helm - uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814 # v4.2.0 - with: - version: v3.14.4 - - #Python is required because ct lint runs Yamale and yamllint which require Python. - - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 - with: - python-version: '3.13' - - - name: Set up chart-testing - uses: helm/chart-testing-action@e6669bcd63d7cb57cb4380c33043eebe5d111992 # v2.6.1 - with: - version: v3.10.1 - - - name: Run chart-testing (lint) - run: ct lint --target-branch ${{ github.event.repository.default_branch }} --chart-dirs examples/chart-helm --charts examples/chart-helm - - - name: Setup minio - run: | - docker network create vllm-net - docker run -d -p 9000:9000 --name minio --net vllm-net \ - -e "MINIO_ACCESS_KEY=minioadmin" \ - -e "MINIO_SECRET_KEY=minioadmin" \ - -v /tmp/data:/data \ - -v /tmp/config:/root/.minio \ - minio/minio server /data - export AWS_ACCESS_KEY_ID=minioadmin - export AWS_SECRET_ACCESS_KEY=minioadmin - export AWS_EC2_METADATA_DISABLED=true - mkdir opt-125m - cd opt-125m && curl -O -Ls "https://huggingface.co/facebook/opt-125m/resolve/main/{pytorch_model.bin,config.json,generation_config.json,merges.txt,special_tokens_map.json,tokenizer_config.json,vocab.json}" && cd .. - aws --endpoint-url http://127.0.0.1:9000/ s3 mb s3://testbucket - aws --endpoint-url http://127.0.0.1:9000/ s3 cp opt-125m/ s3://testbucket/opt-125m --recursive - - - name: Create kind cluster - uses: helm/kind-action@0025e74a8c7512023d06dc019c617aa3cf561fde # v1.10.0 - - - name: Build the Docker image vllm cpu - run: docker buildx build -f Dockerfile.cpu -t vllm-cpu-env . - - - name: Configuration of docker images, network and namespace for the kind cluster - run: | - docker pull amazon/aws-cli:2.6.4 - kind load docker-image amazon/aws-cli:2.6.4 --name chart-testing - kind load docker-image vllm-cpu-env:latest --name chart-testing - docker network connect vllm-net "$(docker ps -aqf "name=chart-testing-control-plane")" - kubectl create ns ns-vllm - - - name: Run chart-testing (install) - run: | - export AWS_ACCESS_KEY_ID=minioadmin - export AWS_SECRET_ACCESS_KEY=minioadmin - helm install --wait --wait-for-jobs --timeout 5m0s --debug --create-namespace --namespace=ns-vllm test-vllm examples/chart-helm -f examples/chart-helm/values.yaml --set secrets.s3endpoint=http://minio:9000 --set secrets.s3bucketname=testbucket --set secrets.s3accesskeyid=$AWS_ACCESS_KEY_ID --set secrets.s3accesskey=$AWS_SECRET_ACCESS_KEY --set resources.requests.cpu=1 --set resources.requests.memory=4Gi --set resources.limits.cpu=2 --set resources.limits.memory=5Gi --set image.env[0].name=VLLM_CPU_KVCACHE_SPACE --set image.env[1].name=VLLM_LOGGING_LEVEL --set-string image.env[0].value="1" --set-string image.env[1].value="DEBUG" --set-string extraInit.s3modelpath="opt-125m/" --set-string 'resources.limits.nvidia\.com/gpu=0' --set-string 'resources.requests.nvidia\.com/gpu=0' --set-string image.repository="vllm-cpu-env" - - - name: curl test - run: | - kubectl -n ns-vllm port-forward service/test-vllm-service 8001:80 & - sleep 10 - CODE="$(curl -v -f --location http://localhost:8001/v1/completions \ - --header "Content-Type: application/json" \ - --data '{ - "model": "opt-125m", - "prompt": "San Francisco is a", - "max_tokens": 7, - "temperature": 0 - }'):$CODE" - echo "$CODE" \ No newline at end of file diff --git a/Dockerfile.neuron b/Dockerfile.neuron index 77162bc82de62..76dbd4c04d3f3 100644 --- a/Dockerfile.neuron +++ b/Dockerfile.neuron @@ -1,6 +1,5 @@ # default base image -# https://gallery.ecr.aws/neuron/pytorch-inference-neuronx -ARG BASE_IMAGE="public.ecr.aws/neuron/pytorch-inference-neuronx:2.1.2-neuronx-py310-sdk2.20.2-ubuntu20.04" +ARG BASE_IMAGE="public.ecr.aws/neuron/pytorch-inference-neuronx:2.1.2-neuronx-py310-sdk2.20.0-ubuntu20.04" FROM $BASE_IMAGE diff --git a/README.md b/README.md index 96c7903e3ce74..661d5ce48e8c6 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,6 @@ Easy, fast, and cheap LLM serving for everyone --- *Latest News* 🔥 -- [2024/12] vLLM joins [pytorch ecosystem](https://pytorch.org/blog/vllm-joins-pytorch)! Easy, Fast, and Cheap LLM Serving for Everyone! - [2024/11] We hosted [the seventh vLLM meetup](https://lu.ma/h0qvrajz) with Snowflake! Please find the meetup slides from vLLM team [here](https://docs.google.com/presentation/d/1e3CxQBV3JsfGp30SwyvS3eM_tW-ghOhJ9PAJGK6KR54/edit?usp=sharing), and Snowflake team [here](https://docs.google.com/presentation/d/1qF3RkDAbOULwz9WK5TOltt2fE9t6uIc_hVNLFAaQX6A/edit?usp=sharing). - [2024/10] We have just created a developer slack ([slack.vllm.ai](https://slack.vllm.ai)) focusing on coordinating contributions and discussing features. Please feel free to join us there! - [2024/10] Ray Summit 2024 held a special track for vLLM! Please find the opening talk slides from the vLLM team [here](https://docs.google.com/presentation/d/1B_KQxpHBTRa_mDF-tR6i8rWdOU5QoTZNcEg2MKZxEHM/edit?usp=sharing). Learn more from the [talks](https://www.youtube.com/playlist?list=PLzTswPQNepXl6AQwifuwUImLPFRVpksjR) from other vLLM contributors and users! diff --git a/csrc/attention/paged_attention_v1.cu b/csrc/attention/paged_attention_v1.cu index cb1a069942069..741cd0c82dc89 100644 --- a/csrc/attention/paged_attention_v1.cu +++ b/csrc/attention/paged_attention_v1.cu @@ -140,10 +140,13 @@ void paged_attention_v1_launcher( blocksparse_block_size, blocksparse_head_sliding_step); #define CALL_V1_LAUNCHER_SPARSITY(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE) \ - if (is_block_sparse) { \ - CALL_V1_LAUNCHER(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE, true); \ - } else { \ - CALL_V1_LAUNCHER(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE, false); \ + switch (is_block_sparse) { \ + case true: \ + CALL_V1_LAUNCHER(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE, true); \ + break; \ + case false: \ + CALL_V1_LAUNCHER(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE, false); \ + break; \ } // NOTE(woosuk): To reduce the compilation time, we omitted block sizes diff --git a/csrc/attention/paged_attention_v2.cu b/csrc/attention/paged_attention_v2.cu index c457bdb89008e..6de8d0bdd5b8d 100644 --- a/csrc/attention/paged_attention_v2.cu +++ b/csrc/attention/paged_attention_v2.cu @@ -147,10 +147,13 @@ void paged_attention_v2_launcher( blocksparse_head_sliding_step); #define CALL_V2_LAUNCHER_SPARSITY(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE) \ - if (is_block_sparse) { \ - CALL_V2_LAUNCHER(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE, true); \ - } else { \ - CALL_V2_LAUNCHER(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE, false); \ + switch (is_block_sparse) { \ + case true: \ + CALL_V2_LAUNCHER(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE, true); \ + break; \ + case false: \ + CALL_V2_LAUNCHER(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE, false); \ + break; \ } // NOTE(woosuk): To reduce the compilation time, we omitted block sizes diff --git a/csrc/cache_kernels.cu b/csrc/cache_kernels.cu index 8a95279f9a25a..1be806bbfa43c 100644 --- a/csrc/cache_kernels.cu +++ b/csrc/cache_kernels.cu @@ -307,20 +307,10 @@ void reshape_and_cache_flash( torch::Tensor& key_cache, // [num_blocks, block_size, num_heads, head_size] torch::Tensor& value_cache, // [num_blocks, block_size, num_heads, head_size] - torch::Tensor& slot_mapping, // [num_tokens] or [num_actual_tokens] + torch::Tensor& slot_mapping, // [num_tokens] const std::string& kv_cache_dtype, const double k_scale, const double v_scale) { - // NOTE(woosuk): In vLLM V1, key.size(0) can be different from - // slot_mapping.size(0) because of padding for CUDA graphs. - // In vLLM V0, key.size(0) is always equal to slot_mapping.size(0) because - // both include padding. - // In vLLM V1, however, key.size(0) can be larger than slot_mapping.size(0) - // since key includes padding for CUDA graphs, while slot_mapping does not. - // In this case, slot_mapping.size(0) represents the actual number of tokens - // before padding. - // For compatibility with both cases, we use slot_mapping.size(0) as the - // number of tokens. - int num_tokens = slot_mapping.size(0); + int num_tokens = key.size(0); int num_heads = key.size(1); int head_size = key.size(2); int block_size = key_cache.size(1); diff --git a/csrc/mamba/causal_conv1d/causal_conv1d.cu b/csrc/mamba/causal_conv1d/causal_conv1d.cu index dd1e6de2e0180..498d069c05f0d 100644 --- a/csrc/mamba/causal_conv1d/causal_conv1d.cu +++ b/csrc/mamba/causal_conv1d/causal_conv1d.cu @@ -424,7 +424,7 @@ void causal_conv1d_fwd_kernel(ConvParamsBase params) { // and the one before it (chunk = n_chunks - 1 and chunk = n_chunks - 2), // (which occurs when `final_state_position` is a non-positivie index) // we load the correct data from smem_exchange from both chunks, the last chunk iteration and the one before it - if (conv_states != nullptr && final_state_position < 0 && seqlen > kWidth){ + if (final_state_position < 0 && seqlen > kWidth){ input_t vals_load[kNElts] = {0}; if ((chunk == n_chunks - 2) && (tidx == kNThreads - 1)){ // chunk = n_chunks - 2, a segment of the final state sits in the last index diff --git a/docs/requirements-docs.txt b/docs/requirements-docs.txt index ca2da4cd66d2d..5c80645b405ae 100644 --- a/docs/requirements-docs.txt +++ b/docs/requirements-docs.txt @@ -16,6 +16,5 @@ mistral_common >= 1.5.0 aiohttp starlette openai # Required by docs/source/serving/openai_compatible_server.md's vllm.entrypoints.openai.cli_args -fastapi # Required by docs/source/serving/openai_compatible_server.md's vllm.entrypoints.openai.cli_args partial-json-parser # Required by docs/source/serving/openai_compatible_server.md's vllm.entrypoints.openai.cli_args requests diff --git a/docs/source/index.rst b/docs/source/index.rst index ebf1361976c5e..86b1eed2d26ba 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -82,7 +82,6 @@ Documentation serving/openai_compatible_server serving/deploying_with_docker serving/deploying_with_k8s - serving/deploying_with_helm serving/deploying_with_nginx serving/distributed_serving serving/metrics @@ -103,7 +102,6 @@ Documentation usage/lora usage/multimodal_inputs - usage/tool_calling usage/structured_outputs usage/spec_decode usage/compatibility_matrix diff --git a/docs/source/models/supported_models.rst b/docs/source/models/supported_models.rst index 4e5b10967e3bb..5b416e04da745 100644 --- a/docs/source/models/supported_models.rst +++ b/docs/source/models/supported_models.rst @@ -495,7 +495,7 @@ Text Generation --------------- .. list-table:: - :widths: 25 25 15 20 5 5 5 + :widths: 25 25 15 25 5 5 :header-rows: 1 * - Architecture @@ -504,168 +504,144 @@ Text Generation - Example HF Models - :ref:`LoRA ` - :ref:`PP ` - - V1 * - :code:`AriaForConditionalGeneration` - Aria - T + I - :code:`rhymes-ai/Aria` - - ✅︎ - - * - :code:`Blip2ForConditionalGeneration` - BLIP-2 - T + I\ :sup:`E` - :code:`Salesforce/blip2-opt-2.7b`, :code:`Salesforce/blip2-opt-6.7b`, etc. - - ✅︎ - - * - :code:`ChameleonForConditionalGeneration` - Chameleon - T + I - :code:`facebook/chameleon-7b` etc. - - ✅︎ - - * - :code:`FuyuForCausalLM` - Fuyu - T + I - :code:`adept/fuyu-8b` etc. - - ✅︎ - - * - :code:`ChatGLMModel` - GLM-4V - T + I - :code:`THUDM/glm-4v-9b` etc. - ✅︎ - ✅︎ - - * - :code:`H2OVLChatModel` - H2OVL - T + I\ :sup:`E+` - :code:`h2oai/h2ovl-mississippi-800m`, :code:`h2oai/h2ovl-mississippi-2b`, etc. - - ✅︎ - - * - :code:`Idefics3ForConditionalGeneration` - Idefics3 - T + I - :code:`HuggingFaceM4/Idefics3-8B-Llama3` etc. - ✅︎ - - - * - :code:`InternVLChatModel` - - InternVL 2.5, Mono-InternVL, InternVL 2.0 + - InternVL2 - T + I\ :sup:`E+` - - :code:`OpenGVLab/InternVL2_5-4B`, :code:`OpenGVLab/Mono-InternVL-2B`, :code:`OpenGVLab/InternVL2-4B`, etc. + - :code:`OpenGVLab/Mono-InternVL-2B`, :code:`OpenGVLab/InternVL2-4B`, :code:`OpenGVLab/InternVL2-8B`, etc. - - ✅︎ - - ✅︎ * - :code:`LlavaForConditionalGeneration` - LLaVA-1.5 - T + I\ :sup:`E+` - - :code:`llava-hf/llava-1.5-7b-hf`, :code:`TIGER-Lab/Mantis-8B-siglip-llama3` (see note), etc. + - :code:`llava-hf/llava-1.5-7b-hf`, :code:`llava-hf/llava-1.5-13b-hf`, etc. - - ✅︎ - - ✅︎ * - :code:`LlavaNextForConditionalGeneration` - LLaVA-NeXT - T + I\ :sup:`E+` - :code:`llava-hf/llava-v1.6-mistral-7b-hf`, :code:`llava-hf/llava-v1.6-vicuna-7b-hf`, etc. - - ✅︎ - - * - :code:`LlavaNextVideoForConditionalGeneration` - LLaVA-NeXT-Video - T + V - :code:`llava-hf/LLaVA-NeXT-Video-7B-hf`, etc. - - ✅︎ - - * - :code:`LlavaOnevisionForConditionalGeneration` - LLaVA-Onevision - T + I\ :sup:`+` + V\ :sup:`+` - :code:`llava-hf/llava-onevision-qwen2-7b-ov-hf`, :code:`llava-hf/llava-onevision-qwen2-0.5b-ov-hf`, etc. - - ✅︎ - - * - :code:`MiniCPMV` - MiniCPM-V - T + I\ :sup:`E+` - :code:`openbmb/MiniCPM-V-2` (see note), :code:`openbmb/MiniCPM-Llama3-V-2_5`, :code:`openbmb/MiniCPM-V-2_6`, etc. - ✅︎ - ✅︎ - - * - :code:`MllamaForConditionalGeneration` - Llama 3.2 - T + I\ :sup:`+` - :code:`meta-llama/Llama-3.2-90B-Vision-Instruct`, :code:`meta-llama/Llama-3.2-11B-Vision`, etc. - - - - * - :code:`MolmoForCausalLM` - Molmo - T + I - :code:`allenai/Molmo-7B-D-0924`, :code:`allenai/Molmo-72B-0924`, etc. - - ✅︎ - - ✅︎ * - :code:`NVLM_D_Model` - NVLM-D 1.0 - T + I\ :sup:`E+` - :code:`nvidia/NVLM-D-72B`, etc. - - ✅︎ - - ✅︎ * - :code:`PaliGemmaForConditionalGeneration` - PaliGemma - T + I\ :sup:`E` - :code:`google/paligemma-3b-pt-224`, :code:`google/paligemma-3b-mix-224`, etc. - - ✅︎ - - * - :code:`Phi3VForCausalLM` - Phi-3-Vision, Phi-3.5-Vision - T + I\ :sup:`E+` - :code:`microsoft/Phi-3-vision-128k-instruct`, :code:`microsoft/Phi-3.5-vision-instruct` etc. - - ✅︎ - - ✅︎ * - :code:`PixtralForConditionalGeneration` - Pixtral - T + I\ :sup:`+` - :code:`mistralai/Pixtral-12B-2409`, :code:`mistral-community/pixtral-12b` etc. - - ✅︎ - - ✅︎ * - :code:`QWenLMHeadModel` - Qwen-VL - T + I\ :sup:`E+` - :code:`Qwen/Qwen-VL`, :code:`Qwen/Qwen-VL-Chat`, etc. - ✅︎ - ✅︎ - - * - :code:`Qwen2AudioForConditionalGeneration` - Qwen2-Audio - T + A\ :sup:`+` - :code:`Qwen/Qwen2-Audio-7B-Instruct` - - ✅︎ - - * - :code:`Qwen2VLForConditionalGeneration` - Qwen2-VL - T + I\ :sup:`E+` + V\ :sup:`E+` - :code:`Qwen/Qwen2-VL-2B-Instruct`, :code:`Qwen/Qwen2-VL-7B-Instruct`, :code:`Qwen/Qwen2-VL-72B-Instruct`, etc. - ✅︎ - ✅︎ - - * - :code:`UltravoxModel` - Ultravox - T + A\ :sup:`E+` - :code:`fixie-ai/ultravox-v0_3` - - ✅︎ - - | :sup:`E` Pre-computed embeddings can be inputted for this modality. | :sup:`+` Multiple items can be inputted per text prompt for this modality. @@ -688,10 +664,6 @@ Text Generation .. note:: vLLM currently only supports adding LoRA to the language backbone of multimodal models. -.. note:: - To use :code:`TIGER-Lab/Mantis-8B-siglip-llama3`, you have to install their GitHub repo (:code:`pip install git+https://github.com/TIGER-AI-Lab/Mantis.git`) - and pass :code:`--hf_overrides '{"architectures": ["MantisForConditionalGeneration"]}'` when running vLLM. - .. note:: The official :code:`openbmb/MiniCPM-V-2` doesn't work yet, so we need to use a fork (:code:`HwwwH/MiniCPM-V-2`) for now. For more details, please see: https://github.com/vllm-project/vllm/pull/4087#issuecomment-2250397630 diff --git a/docs/source/serving/architecture_helm_deployment.png b/docs/source/serving/architecture_helm_deployment.png deleted file mode 100644 index 8f9ca29795ffe..0000000000000 Binary files a/docs/source/serving/architecture_helm_deployment.png and /dev/null differ diff --git a/docs/source/serving/deploying_with_helm.rst b/docs/source/serving/deploying_with_helm.rst deleted file mode 100644 index 21b17e881b945..0000000000000 --- a/docs/source/serving/deploying_with_helm.rst +++ /dev/null @@ -1,253 +0,0 @@ -.. _deploying_with_helm: - -Deploying with Helm -=================== - -A Helm chart to deploy vLLM for Kubernetes - -Helm is a package manager for Kubernetes. It will help you to deploy vLLM on k8s and automate the deployment of vLLMm Kubernetes applications. With Helm, you can deploy the same framework architecture with different configurations to multiple namespaces by overriding variables values. - -This guide will walk you through the process of deploying vLLM with Helm, including the necessary prerequisites, steps for helm install and documentation on architecture and values file. - -Prerequisites -------------- -Before you begin, ensure that you have the following: - -- A running Kubernetes cluster -- NVIDIA Kubernetes Device Plugin (``k8s-device-plugin``): This can be found at `https://github.com/NVIDIA/k8s-device-plugin `__ -- Available GPU resources in your cluster -- S3 with the model which will be deployed - -Installing the chart --------------------- - -To install the chart with the release name ``test-vllm``: - -.. code-block:: console - - helm upgrade --install --create-namespace --namespace=ns-vllm test-vllm . -f values.yaml --set secrets.s3endpoint=$ACCESS_POINT --set secrets.s3buckername=$BUCKET --set secrets.s3accesskeyid=$ACCESS_KEY --set secrets.s3accesskey=$SECRET_KEY - -Uninstalling the Chart ----------------------- - -To uninstall the ``test-vllm`` deployment: - -.. code-block:: console - - helm uninstall test-vllm --namespace=ns-vllm - -The command removes all the Kubernetes components associated with the -chart **including persistent volumes** and deletes the release. - -Architecture ------------- - -.. image:: architecture_helm_deployment.png - -Values ------- - -.. list-table:: Values - :widths: 25 25 25 25 - :header-rows: 1 - - * - Key - - Type - - Default - - Description - * - autoscaling - - object - - {"enabled":false,"maxReplicas":100,"minReplicas":1,"targetCPUUtilizationPercentage":80} - - Autoscaling configuration - * - autoscaling.enabled - - bool - - false - - Enable autoscaling - * - autoscaling.maxReplicas - - int - - 100 - - Maximum replicas - * - autoscaling.minReplicas - - int - - 1 - - Minimum replicas - * - autoscaling.targetCPUUtilizationPercentage - - int - - 80 - - Target CPU utilization for autoscaling - * - configs - - object - - {} - - Configmap - * - containerPort - - int - - 8000 - - Container port - * - customObjects - - list - - [] - - Custom Objects configuration - * - deploymentStrategy - - object - - {} - - Deployment strategy configuration - * - externalConfigs - - list - - [] - - External configuration - * - extraContainers - - list - - [] - - Additional containers configuration - * - extraInit - - object - - {"pvcStorage":"1Gi","s3modelpath":"relative_s3_model_path/opt-125m", "awsEc2MetadataDisabled": true} - - Additional configuration for the init container - * - extraInit.pvcStorage - - string - - "50Gi" - - Storage size of the s3 - * - extraInit.s3modelpath - - string - - "relative_s3_model_path/opt-125m" - - Path of the model on the s3 which hosts model weights and config files - * - extraInit.awsEc2MetadataDisabled - - boolean - - true - - Disables the use of the Amazon EC2 instance metadata service - * - extraPorts - - list - - [] - - Additional ports configuration - * - gpuModels - - list - - ["TYPE_GPU_USED"] - - Type of gpu used - * - image - - object - - {"command":["vllm","serve","/data/","--served-model-name","opt-125m","--host","0.0.0.0","--port","8000"],"repository":"vllm/vllm-openai","tag":"latest"} - - Image configuration - * - image.command - - list - - ["vllm","serve","/data/","--served-model-name","opt-125m","--host","0.0.0.0","--port","8000"] - - Container launch command - * - image.repository - - string - - "vllm/vllm-openai" - - Image repository - * - image.tag - - string - - "latest" - - Image tag - * - livenessProbe - - object - - {"failureThreshold":3,"httpGet":{"path":"/health","port":8000},"initialDelaySeconds":15,"periodSeconds":10} - - Liveness probe configuration - * - livenessProbe.failureThreshold - - int - - 3 - - Number of times after which if a probe fails in a row, Kubernetes considers that the overall check has failed: the container is not alive - * - livenessProbe.httpGet - - object - - {"path":"/health","port":8000} - - Configuration of the Kubelet http request on the server - * - livenessProbe.httpGet.path - - string - - "/health" - - Path to access on the HTTP server - * - livenessProbe.httpGet.port - - int - - 8000 - - Name or number of the port to access on the container, on which the server is listening - * - livenessProbe.initialDelaySeconds - - int - - 15 - - Number of seconds after the container has started before liveness probe is initiated - * - livenessProbe.periodSeconds - - int - - 10 - - How often (in seconds) to perform the liveness probe - * - maxUnavailablePodDisruptionBudget - - string - - "" - - Disruption Budget Configuration - * - readinessProbe - - object - - {"failureThreshold":3,"httpGet":{"path":"/health","port":8000},"initialDelaySeconds":5,"periodSeconds":5} - - Readiness probe configuration - * - readinessProbe.failureThreshold - - int - - 3 - - Number of times after which if a probe fails in a row, Kubernetes considers that the overall check has failed: the container is not ready - * - readinessProbe.httpGet - - object - - {"path":"/health","port":8000} - - Configuration of the Kubelet http request on the server - * - readinessProbe.httpGet.path - - string - - "/health" - - Path to access on the HTTP server - * - readinessProbe.httpGet.port - - int - - 8000 - - Name or number of the port to access on the container, on which the server is listening - * - readinessProbe.initialDelaySeconds - - int - - 5 - - Number of seconds after the container has started before readiness probe is initiated - * - readinessProbe.periodSeconds - - int - - 5 - - How often (in seconds) to perform the readiness probe - * - replicaCount - - int - - 1 - - Number of replicas - * - resources - - object - - {"limits":{"cpu":4,"memory":"16Gi","nvidia.com/gpu":1},"requests":{"cpu":4,"memory":"16Gi","nvidia.com/gpu":1}} - - Resource configuration - * - resources.limits."nvidia.com/gpu" - - int - - 1 - - Number of gpus used - * - resources.limits.cpu - - int - - 4 - - Number of CPUs - * - resources.limits.memory - - string - - "16Gi" - - CPU memory configuration - * - resources.requests."nvidia.com/gpu" - - int - - 1 - - Number of gpus used - * - resources.requests.cpu - - int - - 4 - - Number of CPUs - * - resources.requests.memory - - string - - "16Gi" - - CPU memory configuration - * - secrets - - object - - {} - - Secrets configuration - * - serviceName - - string - - - - Service name - * - servicePort - - int - - 80 - - Service port - * - labels.environment - - string - - test - - Environment name - * - labels.release - - string - - test - - Release name diff --git a/docs/source/serving/deploying_with_kubeai.rst b/docs/source/serving/deploying_with_kubeai.rst deleted file mode 100644 index ec3c065320fd9..0000000000000 --- a/docs/source/serving/deploying_with_kubeai.rst +++ /dev/null @@ -1,17 +0,0 @@ -.. _deploying_with_kubeai: - -Deploying with KubeAI -===================== - -`KubeAI `_ is a Kubernetes operator that enables you to deploy and manage AI models on Kubernetes. It provides a simple and scalable way to deploy vLLM in production. Functionality such as scale-from-zero, load based autoscaling, model caching, and much more is provided out of the box with zero external dependencies. - - -Please see the Installation Guides for environment specific instructions: - -* `Any Kubernetes Cluster `_ -* `EKS `_ -* `GKE `_ - -Once you have KubeAI installed, you can -`configure text generation models `_ -using vLLM. \ No newline at end of file diff --git a/docs/source/serving/integrations.rst b/docs/source/serving/integrations.rst index 0dd505a739863..f39997e0e44d9 100644 --- a/docs/source/serving/integrations.rst +++ b/docs/source/serving/integrations.rst @@ -6,7 +6,6 @@ Integrations run_on_sky deploying_with_kserve - deploying_with_kubeai deploying_with_triton deploying_with_bentoml deploying_with_cerebrium diff --git a/docs/source/serving/openai_compatible_server.md b/docs/source/serving/openai_compatible_server.md index f75653106cf66..d75e90807ca1d 100644 --- a/docs/source/serving/openai_compatible_server.md +++ b/docs/source/serving/openai_compatible_server.md @@ -361,3 +361,220 @@ $ vllm serve SOME_MODEL --config config.yaml **NOTE** In case an argument is supplied simultaneously using command line and the config file, the value from the commandline will take precedence. The order of priorities is `command line > config file values > defaults`. + +--- + +## Tool calling in the chat completion API +vLLM currently supports named function calling, as well as the `auto` and `none` options for the `tool_choice` field in the chat completion API. The `tool_choice` option `required` is **not yet supported** but on the roadmap. + +It is the callers responsibility to prompt the model with the tool information, vLLM will not automatically manipulate the prompt. +Please see below for recommended configuration and chat templates to use when function calling is to be used with the different models. + + +### Named Function Calling +vLLM supports named function calling in the chat completion API by default. It does so using Outlines, so this is +enabled by default, and will work with any supported model. You are guaranteed a validly-parsable function call - not a +high-quality one. + +vLLM will use guided decoding to ensure the response matches the tool parameter object defined by the JSON schema in the `tools` parameter. + +To use a named function, you need to define the functions in the `tools` parameter of the chat completion request, and +specify the `name` of one of the tools in the `tool_choice` parameter of the chat completion request. + + +### Automatic Function Calling +To enable this feature, you should set the following flags: +* `--enable-auto-tool-choice` -- **mandatory** Auto tool choice. tells vLLM that you want to enable the model to generate its own tool calls when it +deems appropriate. +* `--tool-call-parser` -- select the tool parser to use (listed below). Additional tool parsers +will continue to be added in the future, and also can register your own tool parsers in the `--tool-parser-plugin`. +* `--tool-parser-plugin` -- **optional** tool parser plugin used to register user defined tool parsers into vllm, the registered tool parser name can be specified in `--tool-call-parser`. +* `--chat-template` -- **optional** for auto tool choice. the path to the chat template which handles `tool`-role messages and `assistant`-role messages +that contain previously generated tool calls. Hermes, Mistral and Llama models have tool-compatible chat templates in their +`tokenizer_config.json` files, but you can specify a custom template. This argument can be set to `tool_use` if your model has a tool use-specific chat +template configured in the `tokenizer_config.json`. In this case, it will be used per the `transformers` specification. More on this [here](https://huggingface.co/docs/transformers/en/chat_templating#why-do-some-models-have-multiple-templates) +from HuggingFace; and you can find an example of this in a `tokenizer_config.json` [here](https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B/blob/main/tokenizer_config.json) + +If your favorite tool-calling model is not supported, please feel free to contribute a parser & tool use chat template! + + +#### Hermes Models (`hermes`) + +All Nous Research Hermes-series models newer than Hermes 2 Pro should be supported. +* `NousResearch/Hermes-2-Pro-*` +* `NousResearch/Hermes-2-Theta-*` +* `NousResearch/Hermes-3-*` + + +_Note that the Hermes 2 **Theta** models are known to have degraded tool call quality & capabilities due to the merge +step in their creation_. + +Flags: `--tool-call-parser hermes` + + +#### Mistral Models (`mistral`) + +Supported models: +* `mistralai/Mistral-7B-Instruct-v0.3` (confirmed) +* Additional mistral function-calling models are compatible as well. + +Known issues: +1. Mistral 7B struggles to generate parallel tool calls correctly. +2. Mistral's `tokenizer_config.json` chat template requires tool call IDs that are exactly 9 digits, which is +much shorter than what vLLM generates. Since an exception is thrown when this condition +is not met, the following additional chat templates are provided: + +* `examples/tool_chat_template_mistral.jinja` - this is the "official" Mistral chat template, but tweaked so that +it works with vLLM's tool call IDs (provided `tool_call_id` fields are truncated to the last 9 digits) +* `examples/tool_chat_template_mistral_parallel.jinja` - this is a "better" version that adds a tool-use system prompt +when tools are provided, that results in much better reliability when working with parallel tool calling. + + +Recommended flags: `--tool-call-parser mistral --chat-template examples/tool_chat_template_mistral_parallel.jinja` + + +#### Llama Models (`llama3_json`) + +Supported models: +* `meta-llama/Meta-Llama-3.1-8B-Instruct` +* `meta-llama/Meta-Llama-3.1-70B-Instruct` +* `meta-llama/Meta-Llama-3.1-405B-Instruct` +* `meta-llama/Meta-Llama-3.1-405B-Instruct-FP8` + +The tool calling that is supported is the [JSON based tool calling](https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1/#json-based-tool-calling). For [pythonic tool calling](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/text_prompt_format.md#zero-shot-function-calling) in Llama-3.2 models, see the `pythonic` tool parser below. +Other tool calling formats like the built in python tool calling or custom tool calling are not supported. + +Known issues: +1. Parallel tool calls are not supported. +2. The model can generate parameters with a wrong format, such as generating + an array serialized as string instead of an array. + +The `tool_chat_template_llama3_json.jinja` file contains the "official" Llama chat template, but tweaked so that +it works better with vLLM. + +Recommended flags: `--tool-call-parser llama3_json --chat-template examples/tool_chat_template_llama3_json.jinja` + +#### IBM Granite + +Supported models: +* `ibm-granite/granite-3.0-8b-instruct` + +Recommended flags: `--tool-call-parser granite --chat-template examples/tool_chat_template_granite.jinja` + +`examples/tool_chat_template_granite.jinja`: this is a modified chat template from the original on Huggingface. Parallel function calls are supported. + +* `ibm-granite/granite-20b-functioncalling` + +Recommended flags: `--tool-call-parser granite-20b-fc --chat-template examples/tool_chat_template_granite_20b_fc.jinja` + +`examples/tool_chat_template_granite_20b_fc.jinja`: this is a modified chat template from the original on Huggingface, which is not vLLM compatible. It blends function description elements from the Hermes template and follows the same system prompt as "Response Generation" mode from [the paper](https://arxiv.org/abs/2407.00121). Parallel function calls are supported. + + +#### InternLM Models (`internlm`) + +Supported models: +* `internlm/internlm2_5-7b-chat` (confirmed) +* Additional internlm2.5 function-calling models are compatible as well + +Known issues: +* Although this implementation also supports InternLM2, the tool call results are not stable when testing with the `internlm/internlm2-chat-7b` model. + +Recommended flags: `--tool-call-parser internlm --chat-template examples/tool_chat_template_internlm2_tool.jinja` + + +#### Jamba Models (`jamba`) +AI21's Jamba-1.5 models are supported. +* `ai21labs/AI21-Jamba-1.5-Mini` +* `ai21labs/AI21-Jamba-1.5-Large` + + +Flags: `--tool-call-parser jamba` + + +#### Models with Pythonic Tool Calls (`pythonic`) + +A growing number of models output a python list to represent tool calls instead of using JSON. This has the advantage of inherently supporting parallel tool calls and removing ambiguity around the JSON schema required for tool calls. The `pythonic` tool parser can support such models. + +As a concrete example, these models may look up the weather in San Francisco and Seattle by generating: +```python +[get_weather(city='San Francisco', metric='celsius'), get_weather(city='Seattle', metric='celsius')] +``` + +Limitations: +* The model must not generate both text and tool calls in the same generation. This may not be hard to change for a specific model, but the community currently lacks consensus on which tokens to emit when starting and ending tool calls. (In particular, the Llama 3.2 models emit no such tokens.) +* Llama's smaller models struggle to use tools effectively. + +Example supported models: +* `meta-llama/Llama-3.2-1B-Instruct`\* (use with `examples/tool_chat_template_llama3.2_pythonic.jinja`) +* `meta-llama/Llama-3.2-3B-Instruct`\* (use with `examples/tool_chat_template_llama3.2_pythonic.jinja`) +* `Team-ACE/ToolACE-8B` (use with `examples/tool_chat_template_toolace.jinja`) +* `fixie-ai/ultravox-v0_4-ToolACE-8B` (use with `examples/tool_chat_template_toolace.jinja`) + +Flags: `--tool-call-parser pythonic --chat-template {see_above}` + +--- +**WARNING** +Llama's smaller models frequently fail to emit tool calls in the correct format. Your mileage may vary. + +--- + + +### How to write a tool parser plugin + +A tool parser plugin is a Python file containing one or more ToolParser implementations. You can write a ToolParser similar to the `Hermes2ProToolParser` in vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py. + +Here is a summary of a plugin file: + +```python + +# import the required packages + +# define a tool parser and register it to vllm +# the name list in register_module can be used +# in --tool-call-parser. you can define as many +# tool parsers as you want here. +@ToolParserManager.register_module(["example"]) +class ExampleToolParser(ToolParser): + def __init__(self, tokenizer: AnyTokenizer): + super().__init__(tokenizer) + + # adjust request. e.g.: set skip special tokens + # to False for tool call output. + def adjust_request( + self, request: ChatCompletionRequest) -> ChatCompletionRequest: + return request + + # implement the tool call parse for stream call + def extract_tool_calls_streaming( + self, + previous_text: str, + current_text: str, + delta_text: str, + previous_token_ids: Sequence[int], + current_token_ids: Sequence[int], + delta_token_ids: Sequence[int], + request: ChatCompletionRequest, + ) -> Union[DeltaMessage, None]: + return delta + + # implement the tool parse for non-stream call + def extract_tool_calls( + self, + model_output: str, + request: ChatCompletionRequest, + ) -> ExtractedToolCallInformation: + return ExtractedToolCallInformation(tools_called=False, + tool_calls=[], + content=text) + + +``` + +Then you can use this plugin in the command line like this. +``` + --enable-auto-tool-choice \ + --tool-parser-plugin + --tool-call-parser example \ + --chat-template \ +``` + diff --git a/docs/source/usage/spec_decode.rst b/docs/source/usage/spec_decode.rst index f1f1917f974bb..67e8ede7654b7 100644 --- a/docs/source/usage/spec_decode.rst +++ b/docs/source/usage/spec_decode.rst @@ -8,9 +8,6 @@ Speculative decoding not usually yield inter-token latency reductions for all prompt datasets or sampling parameters. The work to optimize it is ongoing and can be followed in `this issue. `_ -.. warning:: - Currently, speculative decoding in vLLM is not compatible with pipeline parallelism. - This document shows how to use `Speculative Decoding `_ with vLLM. Speculative decoding is a technique which improves inter-token latency in memory-bound LLM inference. diff --git a/docs/source/usage/tool_calling.md b/docs/source/usage/tool_calling.md deleted file mode 100644 index f8be023307b0c..0000000000000 --- a/docs/source/usage/tool_calling.md +++ /dev/null @@ -1,287 +0,0 @@ -# Tool Calling - -vLLM currently supports named function calling, as well as the `auto` and `none` options for the `tool_choice` field in the chat completion API. The `tool_choice` option `required` is **not yet supported** but on the roadmap. - -## Quickstart - -Start the server with tool calling enabled. This example uses Meta's Llama 3.1 8B model, so we need to use the llama3 tool calling chat template from the vLLM examples directory: - -```bash -vllm serve meta-llama/Llama-3.1-8B-Instruct \ - --enable-auto-tool-choice \ - --tool-call-parser llama3_json \ - --chat-template examples/tool_chat_template_llama3_json.jinja -``` - -Next, make a request to the model that should result in it using the available tools: - -```python -from openai import OpenAI -import json - -client = OpenAI(base_url="http://localhost:8000/v1", api_key="dummy") - -def get_weather(location: str, unit: str): - return f"Getting the weather for {location} in {unit}..." -tool_functions = {"get_weather": get_weather} - -tools = [{ - "type": "function", - "function": { - "name": "get_weather", - "description": "Get the current weather in a given location", - "parameters": { - "type": "object", - "properties": { - "location": {"type": "string", "description": "City and state, e.g., 'San Francisco, CA'"}, - "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]} - }, - "required": ["location", "unit"] - } - } -}] - -response = client.chat.completions.create( - model=client.models.list().data[0].id, - messages=[{"role": "user", "content": "What's the weather like in San Francisco?"}], - tools=tools, - tool_choice="auto" -) - -tool_call = response.choices[0].message.tool_calls[0].function -print(f"Function called: {tool_call.name}") -print(f"Arguments: {tool_call.arguments}") -print(f"Result: {get_weather(**json.loads(tool_call.arguments))}") -``` - -Example output: -``` -Function called: get_weather -Arguments: {"location": "San Francisco, CA", "unit": "fahrenheit"} -Result: Getting the weather for San Francisco, CA in fahrenheit... -``` - -This example demonstrates: -- Setting up the server with tool calling enabled -- Defining an actual function to handle tool calls -- Making a request with `tool_choice="auto"` -- Handling the structured response and executing the corresponding function - -You can also specify a particular function using named function calling by setting `tool_choice={"type": "function", "function": {"name": "get_weather"}}`. Note that this will use the guided decoding backend - so the first time this is used, there will be several seconds of latency (or more) as the FSM is compiled for the first time before it is cached for subsequent requests. - -Remember that it's the callers responsibility to: -1. Define appropriate tools in the request -2. Include relevant context in the chat messages -3. Handle the tool calls in your application logic - -For more advanced usage, including parallel tool calls and different model-specific parsers, see the sections below. - -## Named Function Calling -vLLM supports named function calling in the chat completion API by default. It does so using Outlines through guided decoding, so this is -enabled by default, and will work with any supported model. You are guaranteed a validly-parsable function call - not a -high-quality one. - -vLLM will use guided decoding to ensure the response matches the tool parameter object defined by the JSON schema in the `tools` parameter. -For best results, we recommend ensuring that the expected output format / schema is specified in the prompt to ensure that the model's intended generation is aligned with the schema that it's being forced to generate by the guided decoding backend. - -To use a named function, you need to define the functions in the `tools` parameter of the chat completion request, and -specify the `name` of one of the tools in the `tool_choice` parameter of the chat completion request. - - -## Automatic Function Calling - -To enable this feature, you should set the following flags: -* `--enable-auto-tool-choice` -- **mandatory** Auto tool choice. tells vLLM that you want to enable the model to generate its own tool calls when it -deems appropriate. -* `--tool-call-parser` -- select the tool parser to use (listed below). Additional tool parsers -will continue to be added in the future, and also can register your own tool parsers in the `--tool-parser-plugin`. -* `--tool-parser-plugin` -- **optional** tool parser plugin used to register user defined tool parsers into vllm, the registered tool parser name can be specified in `--tool-call-parser`. -* `--chat-template` -- **optional** for auto tool choice. the path to the chat template which handles `tool`-role messages and `assistant`-role messages -that contain previously generated tool calls. Hermes, Mistral and Llama models have tool-compatible chat templates in their -`tokenizer_config.json` files, but you can specify a custom template. This argument can be set to `tool_use` if your model has a tool use-specific chat -template configured in the `tokenizer_config.json`. In this case, it will be used per the `transformers` specification. More on this [here](https://huggingface.co/docs/transformers/en/chat_templating#why-do-some-models-have-multiple-templates) -from HuggingFace; and you can find an example of this in a `tokenizer_config.json` [here](https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B/blob/main/tokenizer_config.json) - -If your favorite tool-calling model is not supported, please feel free to contribute a parser & tool use chat template! - - -### Hermes Models (`hermes`) - -All Nous Research Hermes-series models newer than Hermes 2 Pro should be supported. -* `NousResearch/Hermes-2-Pro-*` -* `NousResearch/Hermes-2-Theta-*` -* `NousResearch/Hermes-3-*` - - -_Note that the Hermes 2 **Theta** models are known to have degraded tool call quality & capabilities due to the merge -step in their creation_. - -Flags: `--tool-call-parser hermes` - - -### Mistral Models (`mistral`) - -Supported models: -* `mistralai/Mistral-7B-Instruct-v0.3` (confirmed) -* Additional mistral function-calling models are compatible as well. - -Known issues: -1. Mistral 7B struggles to generate parallel tool calls correctly. -2. Mistral's `tokenizer_config.json` chat template requires tool call IDs that are exactly 9 digits, which is -much shorter than what vLLM generates. Since an exception is thrown when this condition -is not met, the following additional chat templates are provided: - -* `examples/tool_chat_template_mistral.jinja` - this is the "official" Mistral chat template, but tweaked so that -it works with vLLM's tool call IDs (provided `tool_call_id` fields are truncated to the last 9 digits) -* `examples/tool_chat_template_mistral_parallel.jinja` - this is a "better" version that adds a tool-use system prompt -when tools are provided, that results in much better reliability when working with parallel tool calling. - - -Recommended flags: `--tool-call-parser mistral --chat-template examples/tool_chat_template_mistral_parallel.jinja` - - -### Llama Models (`llama3_json`) - -Supported models: -* `meta-llama/Meta-Llama-3.1-8B-Instruct` -* `meta-llama/Meta-Llama-3.1-70B-Instruct` -* `meta-llama/Meta-Llama-3.1-405B-Instruct` -* `meta-llama/Meta-Llama-3.1-405B-Instruct-FP8` - -The tool calling that is supported is the [JSON based tool calling](https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1/#json-based-tool-calling). For [pythonic tool calling](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/text_prompt_format.md#zero-shot-function-calling) in Llama-3.2 models, see the `pythonic` tool parser below. -Other tool calling formats like the built in python tool calling or custom tool calling are not supported. - -Known issues: -1. Parallel tool calls are not supported. -2. The model can generate parameters with a wrong format, such as generating - an array serialized as string instead of an array. - -The `tool_chat_template_llama3_json.jinja` file contains the "official" Llama chat template, but tweaked so that -it works better with vLLM. - -Recommended flags: `--tool-call-parser llama3_json --chat-template examples/tool_chat_template_llama3_json.jinja` - -#### IBM Granite - -Supported models: -* `ibm-granite/granite-3.0-8b-instruct` - -Recommended flags: `--tool-call-parser granite --chat-template examples/tool_chat_template_granite.jinja` - -`examples/tool_chat_template_granite.jinja`: this is a modified chat template from the original on Huggingface. Parallel function calls are supported. - -* `ibm-granite/granite-20b-functioncalling` - -Recommended flags: `--tool-call-parser granite-20b-fc --chat-template examples/tool_chat_template_granite_20b_fc.jinja` - -`examples/tool_chat_template_granite_20b_fc.jinja`: this is a modified chat template from the original on Huggingface, which is not vLLM compatible. It blends function description elements from the Hermes template and follows the same system prompt as "Response Generation" mode from [the paper](https://arxiv.org/abs/2407.00121). Parallel function calls are supported. - - -### InternLM Models (`internlm`) - -Supported models: -* `internlm/internlm2_5-7b-chat` (confirmed) -* Additional internlm2.5 function-calling models are compatible as well - -Known issues: -* Although this implementation also supports InternLM2, the tool call results are not stable when testing with the `internlm/internlm2-chat-7b` model. - -Recommended flags: `--tool-call-parser internlm --chat-template examples/tool_chat_template_internlm2_tool.jinja` - - -### Jamba Models (`jamba`) -AI21's Jamba-1.5 models are supported. -* `ai21labs/AI21-Jamba-1.5-Mini` -* `ai21labs/AI21-Jamba-1.5-Large` - - -Flags: `--tool-call-parser jamba` - - -### Models with Pythonic Tool Calls (`pythonic`) - -A growing number of models output a python list to represent tool calls instead of using JSON. This has the advantage of inherently supporting parallel tool calls and removing ambiguity around the JSON schema required for tool calls. The `pythonic` tool parser can support such models. - -As a concrete example, these models may look up the weather in San Francisco and Seattle by generating: -```python -[get_weather(city='San Francisco', metric='celsius'), get_weather(city='Seattle', metric='celsius')] -``` - -Limitations: -* The model must not generate both text and tool calls in the same generation. This may not be hard to change for a specific model, but the community currently lacks consensus on which tokens to emit when starting and ending tool calls. (In particular, the Llama 3.2 models emit no such tokens.) -* Llama's smaller models struggle to use tools effectively. - -Example supported models: -* `meta-llama/Llama-3.2-1B-Instruct`\* (use with `examples/tool_chat_template_llama3.2_pythonic.jinja`) -* `meta-llama/Llama-3.2-3B-Instruct`\* (use with `examples/tool_chat_template_llama3.2_pythonic.jinja`) -* `Team-ACE/ToolACE-8B` (use with `examples/tool_chat_template_toolace.jinja`) -* `fixie-ai/ultravox-v0_4-ToolACE-8B` (use with `examples/tool_chat_template_toolace.jinja`) - -Flags: `--tool-call-parser pythonic --chat-template {see_above}` - ---- -**WARNING** -Llama's smaller models frequently fail to emit tool calls in the correct format. Your mileage may vary. - ---- - - -## How to write a tool parser plugin - -A tool parser plugin is a Python file containing one or more ToolParser implementations. You can write a ToolParser similar to the `Hermes2ProToolParser` in vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py. - -Here is a summary of a plugin file: - -```python - -# import the required packages - -# define a tool parser and register it to vllm -# the name list in register_module can be used -# in --tool-call-parser. you can define as many -# tool parsers as you want here. -@ToolParserManager.register_module(["example"]) -class ExampleToolParser(ToolParser): - def __init__(self, tokenizer: AnyTokenizer): - super().__init__(tokenizer) - - # adjust request. e.g.: set skip special tokens - # to False for tool call output. - def adjust_request( - self, request: ChatCompletionRequest) -> ChatCompletionRequest: - return request - - # implement the tool call parse for stream call - def extract_tool_calls_streaming( - self, - previous_text: str, - current_text: str, - delta_text: str, - previous_token_ids: Sequence[int], - current_token_ids: Sequence[int], - delta_token_ids: Sequence[int], - request: ChatCompletionRequest, - ) -> Union[DeltaMessage, None]: - return delta - - # implement the tool parse for non-stream call - def extract_tool_calls( - self, - model_output: str, - request: ChatCompletionRequest, - ) -> ExtractedToolCallInformation: - return ExtractedToolCallInformation(tools_called=False, - tool_calls=[], - content=text) - - -``` - -Then you can use this plugin in the command line like this. -``` - --enable-auto-tool-choice \ - --tool-parser-plugin - --tool-call-parser example \ - --chat-template \ -``` - diff --git a/examples/chart-helm/.helmignore b/examples/chart-helm/.helmignore deleted file mode 100644 index 2d1303b784cb8..0000000000000 --- a/examples/chart-helm/.helmignore +++ /dev/null @@ -1,6 +0,0 @@ -*.png -.git/ -ct.yaml -lintconf.yaml -values.schema.json -/workflows \ No newline at end of file diff --git a/examples/chart-helm/Chart.yaml b/examples/chart-helm/Chart.yaml deleted file mode 100644 index fb0f06f6d2701..0000000000000 --- a/examples/chart-helm/Chart.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: v2 -name: chart-vllm -description: Chart vllm - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.0.1 - -maintainers: - - name: mfournioux diff --git a/examples/chart-helm/ct.yaml b/examples/chart-helm/ct.yaml deleted file mode 100644 index d273e118203ad..0000000000000 --- a/examples/chart-helm/ct.yaml +++ /dev/null @@ -1,3 +0,0 @@ -chart-dirs: - - charts -validate-maintainers: false \ No newline at end of file diff --git a/examples/chart-helm/lintconf.yaml b/examples/chart-helm/lintconf.yaml deleted file mode 100644 index c8e8c5d7d9767..0000000000000 --- a/examples/chart-helm/lintconf.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -rules: - braces: - min-spaces-inside: 0 - max-spaces-inside: 0 - min-spaces-inside-empty: -1 - max-spaces-inside-empty: -1 - brackets: - min-spaces-inside: 0 - max-spaces-inside: 0 - min-spaces-inside-empty: -1 - max-spaces-inside-empty: -1 - colons: - max-spaces-before: 0 - max-spaces-after: 1 - commas: - max-spaces-before: 0 - min-spaces-after: 1 - max-spaces-after: 1 - comments: - require-starting-space: true - min-spaces-from-content: 2 - document-end: disable - document-start: disable # No --- to start a file - empty-lines: - max: 2 - max-start: 0 - max-end: 0 - hyphens: - max-spaces-after: 1 - indentation: - spaces: consistent - indent-sequences: whatever # - list indentation will handle both indentation and without - check-multi-line-strings: false - key-duplicates: enable - line-length: disable # Lines can be any length - new-line-at-end-of-file: disable - new-lines: - type: unix - trailing-spaces: enable - truthy: - level: warning \ No newline at end of file diff --git a/examples/chart-helm/templates/_helpers.tpl b/examples/chart-helm/templates/_helpers.tpl deleted file mode 100644 index a9690bad3c945..0000000000000 --- a/examples/chart-helm/templates/_helpers.tpl +++ /dev/null @@ -1,164 +0,0 @@ -{{/* -Define ports for the pods -*/}} -{{- define "chart.container-port" -}} -{{- default "8000" .Values.containerPort }} -{{- end }} - -{{/* -Define service name -*/}} -{{- define "chart.service-name" -}} -{{- if .Values.serviceName }} -{{- .Values.serviceName | lower | trim }} -{{- else }} -"{{ .Release.Name }}-service" -{{- end }} -{{- end }} - -{{/* -Define service port -*/}} -{{- define "chart.service-port" -}} -{{- if .Values.servicePort }} -{{- .Values.servicePort }} -{{- else }} -{{- include "chart.container-port" . }} -{{- end }} -{{- end }} - -{{/* -Define service port name -*/}} -{{- define "chart.service-port-name" -}} -"service-port" -{{- end }} - -{{/* -Define container port name -*/}} -{{- define "chart.container-port-name" -}} -"container-port" -{{- end }} - -{{/* -Define deployment strategy -*/}} -{{- define "chart.strategy" -}} -strategy: -{{- if not .Values.deploymentStrategy }} - rollingUpdate: - maxSurge: 100% - maxUnavailable: 0 -{{- else }} -{{ toYaml .Values.deploymentStrategy | indent 2 }} -{{- end }} -{{- end }} - -{{/* -Define additional ports -*/}} -{{- define "chart.extraPorts" }} -{{- with .Values.extraPorts }} -{{ toYaml . }} -{{- end }} -{{- end }} - -{{/* -Define chart external ConfigMaps and Secrets -*/}} -{{- define "chart.externalConfigs" -}} -{{- with .Values.externalConfigs -}} -{{ toYaml . }} -{{- end }} -{{- end }} - - -{{/* -Define liveness et readiness probes -*/}} -{{- define "chart.probes" -}} -{{- if .Values.readinessProbe }} -readinessProbe: -{{- with .Values.readinessProbe }} -{{- toYaml . | nindent 2 }} -{{- end }} -{{- end }} -{{- if .Values.livenessProbe }} -livenessProbe: -{{- with .Values.livenessProbe }} -{{- toYaml . | nindent 2 }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Define resources -*/}} -{{- define "chart.resources" -}} -requests: - memory: {{ required "Value 'resources.requests.memory' must be defined !" .Values.resources.requests.memory | quote }} - cpu: {{ required "Value 'resources.requests.cpu' must be defined !" .Values.resources.requests.cpu | quote }} - {{- if and (gt (int (index .Values.resources.requests "nvidia.com/gpu")) 0) (gt (int (index .Values.resources.limits "nvidia.com/gpu")) 0) }} - nvidia.com/gpu: {{ required "Value 'resources.requests.nvidia.com/gpu' must be defined !" (index .Values.resources.requests "nvidia.com/gpu") | quote }} - {{- end }} -limits: - memory: {{ required "Value 'resources.limits.memory' must be defined !" .Values.resources.limits.memory | quote }} - cpu: {{ required "Value 'resources.limits.cpu' must be defined !" .Values.resources.limits.cpu | quote }} - {{- if and (gt (int (index .Values.resources.requests "nvidia.com/gpu")) 0) (gt (int (index .Values.resources.limits "nvidia.com/gpu")) 0) }} - nvidia.com/gpu: {{ required "Value 'resources.limits.nvidia.com/gpu' must be defined !" (index .Values.resources.limits "nvidia.com/gpu") | quote }} - {{- end }} -{{- end }} - - -{{/* -Define User used for the main container -*/}} -{{- define "chart.user" }} -{{- if .Values.image.runAsUser }} -runAsUser: -{{- with .Values.runAsUser }} -{{- toYaml . | nindent 2 }} -{{- end }} -{{- end }} -{{- end }} - -{{- define "chart.extraInitImage" -}} -"amazon/aws-cli:2.6.4" -{{- end }} - -{{- define "chart.extraInitEnv" -}} -- name: S3_ENDPOINT_URL - valueFrom: - secretKeyRef: - name: {{ .Release.Name }}-secrets - key: s3endpoint -- name: S3_BUCKET_NAME - valueFrom: - secretKeyRef: - name: {{ .Release.Name }}-secrets - key: s3bucketname -- name: AWS_ACCESS_KEY_ID - valueFrom: - secretKeyRef: - name: {{ .Release.Name }}-secrets - key: s3accesskeyid -- name: AWS_SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - name: {{ .Release.Name }}-secrets - key: s3accesskey -- name: S3_PATH - value: "{{ .Values.extraInit.s3modelpath }}" -- name: AWS_EC2_METADATA_DISABLED - value: "{{ .Values.extraInit.awsEc2MetadataDisabled }}" -{{- end }} - -{{/* - Define chart labels -*/}} -{{- define "chart.labels" -}} -{{- with .Values.labels -}} -{{ toYaml . }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/examples/chart-helm/templates/configmap.yaml b/examples/chart-helm/templates/configmap.yaml deleted file mode 100644 index cc5d03782f878..0000000000000 --- a/examples/chart-helm/templates/configmap.yaml +++ /dev/null @@ -1,11 +0,0 @@ -{{- if .Values.configs -}} -apiVersion: v1 -kind: ConfigMap -metadata: - name: "{{ .Release.Name }}-configs" - namespace: {{ .Release.Namespace }} -data: - {{- with .Values.configs }} - {{- toYaml . | nindent 2 }} - {{- end }} -{{- end -}} \ No newline at end of file diff --git a/examples/chart-helm/templates/custom-objects.yaml b/examples/chart-helm/templates/custom-objects.yaml deleted file mode 100644 index 8a65ffd0e552d..0000000000000 --- a/examples/chart-helm/templates/custom-objects.yaml +++ /dev/null @@ -1,6 +0,0 @@ -{{- if .Values.customObjects }} -{{- range .Values.customObjects }} -{{- tpl (. | toYaml) $ }} ---- -{{- end }} -{{- end }} \ No newline at end of file diff --git a/examples/chart-helm/templates/deployment.yaml b/examples/chart-helm/templates/deployment.yaml deleted file mode 100644 index 536983b587be2..0000000000000 --- a/examples/chart-helm/templates/deployment.yaml +++ /dev/null @@ -1,122 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: "{{ .Release.Name }}-deployment-vllm" - namespace: {{ .Release.Namespace }} - labels: - {{- include "chart.labels" . | nindent 4 }} -spec: - replicas: {{ .Values.replicaCount }} - {{- include "chart.strategy" . | nindent 2 }} - selector: - matchLabels: - environment: "test" - release: "test" - progressDeadlineSeconds: 1200 - template: - metadata: - labels: - environment: "test" - release: "test" - spec: - containers: - - name: "vllm" - image: "{{ required "Required value 'image.repository' must be defined !" .Values.image.repository }}:{{ required "Required value 'image.tag' must be defined !" .Values.image.tag }}" - {{- if .Values.image.command }} - command : - {{- with .Values.image.command }} - {{- toYaml . | nindent 10 }} - {{- end }} - {{- end }} - securityContext: - {{- if .Values.image.securityContext }} - {{- with .Values.image.securityContext }} - {{- toYaml . | nindent 12 }} - {{- end }} - {{- else }} - runAsNonRoot: false - {{- include "chart.user" . | indent 12 }} - {{- end }} - imagePullPolicy: IfNotPresent - {{- if .Values.image.env }} - env : - {{- with .Values.image.env }} - {{- toYaml . | nindent 10 }} - {{- end }} - {{- else }} - env: [] - {{- end }} - {{- if or .Values.externalConfigs .Values.configs .Values.secrets }} - envFrom: - {{- if .Values.configs }} - - configMapRef: - name: "{{ .Release.Name }}-configs" - {{- end }} - {{- if .Values.secrets}} - - secretRef: - name: "{{ .Release.Name }}-secrets" - {{- end }} - {{- include "chart.externalConfigs" . | nindent 12 }} - {{- end }} - ports: - - name: {{ include "chart.container-port-name" . }} - containerPort: {{ include "chart.container-port" . }} - {{- include "chart.extraPorts" . | nindent 12 }} - {{- include "chart.probes" . | indent 10 }} - resources: {{- include "chart.resources" . | nindent 12 }} - volumeMounts: - - name: {{ .Release.Name }}-storage - mountPath: /data - - {{- with .Values.extraContainers }} - {{ toYaml . | nindent 8 }} - {{- end }} - - {{- if .Values.extraInit }} - initContainers: - - name: wait-download-model - image: {{ include "chart.extraInitImage" . }} - command: - - /bin/bash - args: - - -eucx - - while aws --endpoint-url $S3_ENDPOINT_URL s3 sync --dryrun s3://$S3_BUCKET_NAME/$S3_PATH /data | grep -q download; do sleep 10; done - env: {{- include "chart.extraInitEnv" . | nindent 10 }} - resources: - requests: - cpu: 200m - memory: 1Gi - limits: - cpu: 500m - memory: 2Gi - volumeMounts: - - name: {{ .Release.Name }}-storage - mountPath: /data - {{- end }} - volumes: - - name: {{ .Release.Name }}-storage - persistentVolumeClaim: - claimName: {{ .Release.Name }}-storage-claim - - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- if and (gt (int (index .Values.resources.requests "nvidia.com/gpu")) 0) (gt (int (index .Values.resources.limits "nvidia.com/gpu")) 0) }} - runtimeClassName: nvidia - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: nvidia.com/gpu.product - operator: In - {{- with .Values.gpuModels }} - values: - {{- toYaml . | nindent 20 }} - {{- end }} - {{- end }} \ No newline at end of file diff --git a/examples/chart-helm/templates/hpa.yaml b/examples/chart-helm/templates/hpa.yaml deleted file mode 100644 index 5ca94c8213541..0000000000000 --- a/examples/chart-helm/templates/hpa.yaml +++ /dev/null @@ -1,31 +0,0 @@ -{{- if .Values.autoscaling.enabled }} -apiVersion: autoscaling/v2 -kind: HorizontalPodAutoscaler -metadata: - name: "{{ .Release.Name }}-hpa" - namespace: {{ .Release.Namespace }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: vllm - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} - metrics: - {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} - {{- end }} - {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - target: - type: Utilization - averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} - {{- end }} -{{- end }} \ No newline at end of file diff --git a/examples/chart-helm/templates/job.yaml b/examples/chart-helm/templates/job.yaml deleted file mode 100644 index f9ea3541e78d2..0000000000000 --- a/examples/chart-helm/templates/job.yaml +++ /dev/null @@ -1,37 +0,0 @@ -{{- if .Values.extraInit }} -apiVersion: batch/v1 -kind: Job -metadata: - name: "{{ .Release.Name }}-init-vllm" - namespace: {{ .Release.Namespace }} -spec: - ttlSecondsAfterFinished: 100 - template: - metadata: - name: init-vllm - spec: - containers: - - name: job-download-model - image: {{ include "chart.extraInitImage" . }} - command: - - /bin/bash - args: - - -eucx - - aws --endpoint-url $S3_ENDPOINT_URL s3 sync s3://$S3_BUCKET_NAME/$S3_PATH /data - env: {{- include "chart.extraInitEnv" . | nindent 8 }} - volumeMounts: - - name: {{ .Release.Name }}-storage - mountPath: /data - resources: - requests: - cpu: 200m - memory: 1Gi - limits: - cpu: 500m - memory: 2Gi - restartPolicy: OnFailure - volumes: - - name: {{ .Release.Name }}-storage - persistentVolumeClaim: - claimName: "{{ .Release.Name }}-storage-claim" -{{- end }} \ No newline at end of file diff --git a/examples/chart-helm/templates/poddisruptionbudget.yaml b/examples/chart-helm/templates/poddisruptionbudget.yaml deleted file mode 100644 index 512bac727da87..0000000000000 --- a/examples/chart-helm/templates/poddisruptionbudget.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: "{{ .Release.Name }}-pdb" - namespace: {{ .Release.Namespace }} -spec: - maxUnavailable: {{ default 1 .Values.maxUnavailablePodDisruptionBudget }} \ No newline at end of file diff --git a/examples/chart-helm/templates/pvc.yaml b/examples/chart-helm/templates/pvc.yaml deleted file mode 100644 index e8d203a7a5ace..0000000000000 --- a/examples/chart-helm/templates/pvc.yaml +++ /dev/null @@ -1,13 +0,0 @@ -{{- if .Values.extraInit }} -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: "{{ .Release.Name }}-storage-claim" - namespace: {{ .Release.Namespace }} -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: {{ .Values.extraInit.pvcStorage }} -{{- end }} \ No newline at end of file diff --git a/examples/chart-helm/templates/secrets.yaml b/examples/chart-helm/templates/secrets.yaml deleted file mode 100644 index 4e88e747b616a..0000000000000 --- a/examples/chart-helm/templates/secrets.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: "{{ .Release.Name }}-secrets" - namespace: {{ .Release.Namespace }} -type: Opaque -data: - {{- range $key, $val := .Values.secrets }} - {{ $key }}: {{ $val | b64enc | quote }} - {{- end }} \ No newline at end of file diff --git a/examples/chart-helm/templates/service.yaml b/examples/chart-helm/templates/service.yaml deleted file mode 100644 index 12d0f68b03a35..0000000000000 --- a/examples/chart-helm/templates/service.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: "{{ .Release.Name }}-service" - namespace: {{ .Release.Namespace }} -spec: - type: ClusterIP - ports: - - name: {{ include "chart.service-port-name" . }} - port: {{ include "chart.service-port" . }} - targetPort: {{ include "chart.container-port-name" . }} - protocol: TCP - selector: - {{- include "chart.labels" . | nindent 4 }} \ No newline at end of file diff --git a/examples/chart-helm/values.schema.json b/examples/chart-helm/values.schema.json deleted file mode 100644 index 812d54bde1397..0000000000000 --- a/examples/chart-helm/values.schema.json +++ /dev/null @@ -1,265 +0,0 @@ -{ - "$schema": "http://json-schema.org/schema#", - "type": "object", - "properties": { - "image": { - "type": "object", - "properties": { - "repository": { - "type": "string" - }, - "tag": { - "type": "string" - }, - "command": { - "type": "array", - "items": { - "type": "string" - } - } - }, - "required": [ - "command", - "repository", - "tag" - ] - }, - "containerPort": { - "type": "integer" - }, - "serviceName": { - "type": "null" - }, - "servicePort": { - "type": "integer" - }, - "extraPorts": { - "type": "array" - }, - "replicaCount": { - "type": "integer" - }, - "deploymentStrategy": { - "type": "object" - }, - "resources": { - "type": "object", - "properties": { - "requests": { - "type": "object", - "properties": { - "cpu": { - "type": "integer" - }, - "memory": { - "type": "string" - }, - "nvidia.com/gpu": { - "type": "integer" - } - }, - "required": [ - "cpu", - "memory", - "nvidia.com/gpu" - ] - }, - "limits": { - "type": "object", - "properties": { - "cpu": { - "type": "integer" - }, - "memory": { - "type": "string" - }, - "nvidia.com/gpu": { - "type": "integer" - } - }, - "required": [ - "cpu", - "memory", - "nvidia.com/gpu" - ] - } - }, - "required": [ - "limits", - "requests" - ] - }, - "gpuModels": { - "type": "array", - "items": { - "type": "string" - } - }, - "autoscaling": { - "type": "object", - "properties": { - "enabled": { - "type": "boolean" - }, - "minReplicas": { - "type": "integer" - }, - "maxReplicas": { - "type": "integer" - }, - "targetCPUUtilizationPercentage": { - "type": "integer" - } - }, - "required": [ - "enabled", - "maxReplicas", - "minReplicas", - "targetCPUUtilizationPercentage" - ] - }, - "configs": { - "type": "object" - }, - "secrets": { - "type": "object" - }, - "externalConfigs": { - "type": "array" - }, - "customObjects": { - "type": "array" - }, - "maxUnavailablePodDisruptionBudget": { - "type": "string" - }, - "extraInit": { - "type": "object", - "properties": { - "s3modelpath": { - "type": "string" - }, - "pvcStorage": { - "type": "string" - }, - "awsEc2MetadataDisabled": { - "type": "boolean" - } - }, - "required": [ - "pvcStorage", - "s3modelpath", - "awsEc2MetadataDisabled" - ] - }, - "extraContainers": { - "type": "array" - }, - "readinessProbe": { - "type": "object", - "properties": { - "initialDelaySeconds": { - "type": "integer" - }, - "periodSeconds": { - "type": "integer" - }, - "failureThreshold": { - "type": "integer" - }, - "httpGet": { - "type": "object", - "properties": { - "path": { - "type": "string" - }, - "port": { - "type": "integer" - } - }, - "required": [ - "path", - "port" - ] - } - }, - "required": [ - "failureThreshold", - "httpGet", - "initialDelaySeconds", - "periodSeconds" - ] - }, - "livenessProbe": { - "type": "object", - "properties": { - "initialDelaySeconds": { - "type": "integer" - }, - "failureThreshold": { - "type": "integer" - }, - "periodSeconds": { - "type": "integer" - }, - "httpGet": { - "type": "object", - "properties": { - "path": { - "type": "string" - }, - "port": { - "type": "integer" - } - }, - "required": [ - "path", - "port" - ] - } - }, - "required": [ - "failureThreshold", - "httpGet", - "initialDelaySeconds", - "periodSeconds" - ] - }, - "labels": { - "type": "object", - "properties": { - "environment": { - "type": "string" - }, - "release": { - "type": "string" - } - }, - "required": [ - "environment", - "release" - ] - } - }, - "required": [ - "autoscaling", - "configs", - "containerPort", - "customObjects", - "deploymentStrategy", - "externalConfigs", - "extraContainers", - "extraInit", - "extraPorts", - "gpuModels", - "image", - "labels", - "livenessProbe", - "maxUnavailablePodDisruptionBudget", - "readinessProbe", - "replicaCount", - "resources", - "secrets", - "servicePort" - ] -} \ No newline at end of file diff --git a/examples/chart-helm/values.yaml b/examples/chart-helm/values.yaml deleted file mode 100644 index 9c48e7d061bf7..0000000000000 --- a/examples/chart-helm/values.yaml +++ /dev/null @@ -1,119 +0,0 @@ -# -- Default values for chart vllm -# -- Declare variables to be passed into your templates. - -# -- Image configuration -image: - # -- Image repository - repository: "vllm/vllm-openai" - # -- Image tag - tag: "latest" - # -- Container launch command - command: ["vllm", "serve", "/data/", "--served-model-name", "opt-125m", "--dtype", "bfloat16", "--host", "0.0.0.0", "--port", "8000"] - -# -- Container port -containerPort: 8000 -# -- Service name -serviceName: -# -- Service port -servicePort: 80 -# -- Additional ports configuration -extraPorts: [] - -# -- Number of replicas -replicaCount: 1 - -# -- Deployment strategy configuration -deploymentStrategy: {} - -# -- Resource configuration -resources: - requests: - # -- Number of CPUs - cpu: 4 - # -- CPU memory configuration - memory: 16Gi - # -- Number of gpus used - nvidia.com/gpu: 1 - limits: - # -- Number of CPUs - cpu: 4 - # -- CPU memory configuration - memory: 16Gi - # -- Number of gpus used - nvidia.com/gpu: 1 - -# -- Type of gpu used -gpuModels: - - "TYPE_GPU_USED" - -# -- Autoscaling configuration -autoscaling: - # -- Enable autoscaling - enabled: false - # -- Minimum replicas - minReplicas: 1 - # -- Maximum replicas - maxReplicas: 100 - # -- Target CPU utilization for autoscaling - targetCPUUtilizationPercentage: 80 - # targetMemoryUtilizationPercentage: 80 - -# -- Configmap -configs: {} - -# -- Secrets configuration -secrets: {} - -# -- External configuration -externalConfigs: [] - -# -- Custom Objects configuration -customObjects: [] - -# -- Disruption Budget Configuration -maxUnavailablePodDisruptionBudget: "" - -# -- Additional configuration for the init container -extraInit: - # -- Path of the model on the s3 which hosts model weights and config files - s3modelpath: "relative_s3_model_path/opt-125m" - # -- Storage size of the s3 - pvcStorage: "1Gi" - awsEc2MetadataDisabled: true - -# -- Additional containers configuration -extraContainers: [] - -# -- Readiness probe configuration -readinessProbe: - # -- Number of seconds after the container has started before readiness probe is initiated - initialDelaySeconds: 5 - # -- How often (in seconds) to perform the readiness probe - periodSeconds: 5 - # -- Number of times after which if a probe fails in a row, Kubernetes considers that the overall check has failed: the container is not ready - failureThreshold: 3 - # -- Configuration of the Kubelet http request on the server - httpGet: - # -- Path to access on the HTTP server - path: /health - # -- Name or number of the port to access on the container, on which the server is listening - port: 8000 - -# -- Liveness probe configuration -livenessProbe: - # -- Number of seconds after the container has started before liveness probe is initiated - initialDelaySeconds: 15 - # -- Number of times after which if a probe fails in a row, Kubernetes considers that the overall check has failed: the container is not alive - failureThreshold: 3 - # -- How often (in seconds) to perform the liveness probe - periodSeconds: 10 - # -- Configuration of the Kubelet http request on the server - httpGet: - # -- Path to access on the HTTP server - path: /health - # -- Name or number of the port to access on the container, on which the server is listening - port: 8000 - -labels: - environment: "test" - release: "test" diff --git a/examples/offline_inference_vision_language.py b/examples/offline_inference_vision_language.py index c6a274ee5894b..f08f22eec164a 100644 --- a/examples/offline_inference_vision_language.py +++ b/examples/offline_inference_vision_language.py @@ -223,7 +223,7 @@ def run_internvl(question: str, modality: str): # Stop tokens for InternVL # models variants may have different stop tokens # please refer to the model card for the correct "stop words": - # https://huggingface.co/OpenGVLab/InternVL2-2B/blob/main/conversation.py + # https://huggingface.co/OpenGVLab/InternVL2-2B#service stop_tokens = ["<|endoftext|>", "<|im_start|>", "<|im_end|>", "<|end|>"] stop_token_ids = [tokenizer.convert_tokens_to_ids(i) for i in stop_tokens] return llm, prompt, stop_token_ids @@ -419,22 +419,6 @@ def run_aria(question: str, modality: str): return llm, prompt, stop_token_ids -# Mantis -def run_mantis(question: str, modality: str): - assert modality == "image" - - llama3_template = '<|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n' # noqa: E501 - prompt = llama3_template.format(f"{question}\n") - - llm = LLM( - model="TIGER-Lab/Mantis-8B-siglip-llama3", - max_model_len=4096, - hf_overrides={"architectures": ["MantisForConditionalGeneration"]}, - ) - stop_token_ids = [128009] - return llm, prompt, stop_token_ids - - model_example_map = { "llava": run_llava, "llava-next": run_llava_next, @@ -457,7 +441,6 @@ def run_mantis(question: str, modality: str): "glm4v": run_glm4v, "idefics3": run_idefics3, "aria": run_aria, - "mantis": run_mantis, } diff --git a/examples/offline_inference_vision_language_multi_image.py b/examples/offline_inference_vision_language_multi_image.py index 928bbef54eab7..788b604cfd4a0 100644 --- a/examples/offline_inference_vision_language_multi_image.py +++ b/examples/offline_inference_vision_language_multi_image.py @@ -165,7 +165,7 @@ def load_internvl(question: str, image_urls: List[str]) -> ModelRequestData: # Stop tokens for InternVL # models variants may have different stop tokens # please refer to the model card for the correct "stop words": - # https://huggingface.co/OpenGVLab/InternVL2-2B/blob/main/conversation.py + # https://huggingface.co/OpenGVLab/InternVL2-2B#service stop_tokens = ["<|endoftext|>", "<|im_start|>", "<|im_end|>", "<|end|>"] stop_token_ids = [tokenizer.convert_tokens_to_ids(i) for i in stop_tokens] diff --git a/requirements-common.txt b/requirements-common.txt index 112528880c0ac..72fb020a82c4e 100644 --- a/requirements-common.txt +++ b/requirements-common.txt @@ -19,7 +19,7 @@ prometheus-fastapi-instrumentator >= 7.0.0 tiktoken >= 0.6.0 # Required for DBRX tokenizer lm-format-enforcer >= 0.10.9, < 0.11 outlines >= 0.0.43, < 0.1 -xgrammar >= 0.1.6; platform_machine == "x86_64" +xgrammar >= 0.1.5; platform_machine == "x86_64" typing_extensions >= 4.10 filelock >= 3.16.1 # need to contain https://github.com/tox-dev/filelock/pull/317 partial-json-parser # used for parsing partial JSON outputs diff --git a/requirements-test.in b/requirements-test.in index c0b228148ab31..44972866ddc4b 100644 --- a/requirements-test.in +++ b/requirements-test.in @@ -24,6 +24,9 @@ mistral_common[opencv] >= 1.5.0 # required for pixtral test datamodel_code_generator # required for minicpm3 test lm-eval[api]==0.4.4 # required for model evaluation test +# TODO: Add this after fully implementing llava(mantis) +# git+https://github.com/TIGER-AI-Lab/Mantis.git # required for llava(mantis) test + # quantization bitsandbytes>=0.44.0 buildkite-test-collector==0.1.9 diff --git a/requirements-test.txt b/requirements-test.txt index 38a064bca449a..19369254dbe26 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -1,8 +1,8 @@ # -# This file is autogenerated by pip-compile with Python 3.12 +# This file is autogenerated by pip-compile with Python 3.9 # by the following command: # -# python3.12 -m piptools compile requirements-test.in -o requirements-test.txt +# pip-compile requirements-test.in # absl-py==2.1.0 # via rouge-score @@ -27,6 +27,10 @@ anyio==4.6.2.post1 # via httpx argcomplete==3.5.1 # via datamodel-code-generator +async-timeout==4.0.3 + # via + # aiohttp + # redis attrs==24.2.0 # via # aiohttp @@ -107,6 +111,10 @@ email-validator==2.2.0 # via pydantic evaluate==0.4.3 # via lm-eval +exceptiongroup==1.2.2 + # via + # anyio + # pytest fastrlock==0.8.2 # via cupy-cuda12x filelock==3.16.1 @@ -157,6 +165,8 @@ idna==3.10 # httpx # requests # yarl +importlib-resources==6.4.5 + # via matplotlib inflect==5.6.2 # via datamodel-code-generator iniconfig==2.0.0 @@ -508,6 +518,12 @@ timm==1.0.11 # via -r requirements-test.in tokenizers==0.20.3 # via transformers +toml==0.10.2 + # via datamodel-code-generator +tomli==2.0.2 + # via + # black + # pytest torch==2.5.1 # via # -r requirements-test.in @@ -551,9 +567,12 @@ typepy[datetime]==1.3.2 # tabledata typing-extensions==4.12.2 # via + # anyio + # black # huggingface-hub # librosa # mistral-common + # multidict # pydantic # pydantic-core # torch @@ -571,6 +590,8 @@ xxhash==3.5.0 # evaluate yarl==1.17.1 # via aiohttp +zipp==3.20.2 + # via importlib-resources zstandard==0.23.0 # via lm-eval diff --git a/tests/basic_correctness/test_basic_correctness.py b/tests/basic_correctness/test_basic_correctness.py index 11d05cefb7313..fcba253d159f3 100644 --- a/tests/basic_correctness/test_basic_correctness.py +++ b/tests/basic_correctness/test_basic_correctness.py @@ -26,14 +26,6 @@ TARGET_TEST_SUITE = os.environ.get("TARGET_TEST_SUITE", "L4") -@pytest.fixture(autouse=True) -def v1(run_with_both_engines): - # Simple autouse wrapper to run both engines for each test - # This can be promoted up to conftest.py to run for every - # test in a package - pass - - def test_vllm_gc_ed(): """Verify vllm instance is GC'ed when it is deleted""" llm = LLM("facebook/opt-125m") @@ -44,7 +36,6 @@ def test_vllm_gc_ed(): assert weak_llm() is None -@pytest.mark.skip_v1 @pytest.mark.parametrize("model", MODELS) @pytest.mark.parametrize("backend", ["FLASH_ATTN", "XFORMERS", "FLASHINFER"]) @pytest.mark.parametrize("dtype", ["half"]) @@ -127,11 +118,6 @@ def test_models_distributed( if attention_backend: os.environ["VLLM_ATTENTION_BACKEND"] = attention_backend - # Import VLLM_USE_V1 dynamically to handle patching - from vllm.envs import VLLM_USE_V1 - if VLLM_USE_V1 and distributed_executor_backend != "mp": - pytest.skip(f"Skip {distributed_executor_backend} for V1") - dtype = "half" max_tokens = 5 @@ -157,7 +143,6 @@ def test_models_distributed( ) -@pytest.mark.skip_v1 def test_model_with_failure(vllm_runner) -> None: try: with patch("vllm.model_executor.models.opt.OPTForCausalLM.forward", @@ -184,7 +169,6 @@ def test_model_with_failure(vllm_runner) -> None: os.remove(filename) -@pytest.mark.skip_v1 def test_failure_with_async_out_proc(vllm_runner) -> None: filename = None diff --git a/tests/conftest.py b/tests/conftest.py index 9365b52dc74e1..070b05504ffd9 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -5,6 +5,7 @@ from enum import Enum from typing import (Any, Callable, Dict, List, Optional, Tuple, Type, TypedDict, TypeVar, Union) +from unittest.mock import patch import numpy as np import pytest @@ -109,7 +110,7 @@ def prompts(self, prompts: _VideoAssetPrompts) -> List[str]: @pytest.fixture(params=[True, False]) -def run_with_both_engines(request, monkeypatch): +def run_with_both_engines(request): # Automatically runs tests twice, once with V1 and once without use_v1 = request.param # Tests decorated with `@skip_v1` are only run without v1 @@ -118,11 +119,11 @@ def run_with_both_engines(request, monkeypatch): if use_v1: if skip_v1: pytest.skip("Skipping test on vllm V1") - monkeypatch.setenv('VLLM_USE_V1', '1') + with patch('vllm.envs.VLLM_USE_V1', True): + yield else: - monkeypatch.setenv('VLLM_USE_V1', '0') - - yield + with patch('vllm.envs.VLLM_USE_V1', False): + yield @pytest.fixture(autouse=True) diff --git a/tests/distributed/test_pipeline_parallel.py b/tests/distributed/test_pipeline_parallel.py index b818ca921fcb0..386877e0e0a2c 100644 --- a/tests/distributed/test_pipeline_parallel.py +++ b/tests/distributed/test_pipeline_parallel.py @@ -247,19 +247,9 @@ def _compare_tp( *, method: Literal["generate", "encode"], ): - ( - tp_size, - pp_size, - eager_mode, - chunked_prefill, - ) = parallel_setup - ( - multi_node_only, - trust_remote_code, - tokenizer_mode, - load_format, - hf_overrides, - ) = test_options + tp_size, pp_size, eager_mode, chunked_prefill = parallel_setup + multi_node_only, trust_remote_code, tokenizer_mode, \ + load_format, hf_overrides = test_options if num_gpus_available < tp_size * pp_size: pytest.skip(f"Need at least {tp_size} x {pp_size} GPUs") diff --git a/tests/engine/test_arg_utils.py b/tests/engine/test_arg_utils.py index 4e269de9fc40b..de78d41ad12eb 100644 --- a/tests/engine/test_arg_utils.py +++ b/tests/engine/test_arg_utils.py @@ -50,12 +50,12 @@ def test_compilation_config(): args = parser.parse_args(["-O=3"]) assert args.compilation_config.level == 3 - # set to string form of a dict - args = parser.parse_args(["--compilation-config", "{'level': 3}"]) + # set to json + args = parser.parse_args(["--compilation-config", '{"level": 3}']) assert args.compilation_config.level == 3 - # set to string form of a dict - args = parser.parse_args(["--compilation-config={'level': 3}"]) + # set to json + args = parser.parse_args(['--compilation-config={"level": 3}']) assert args.compilation_config.level == 3 diff --git a/tests/entrypoints/openai/test_vision.py b/tests/entrypoints/openai/test_vision.py index a0b6edd566561..157d873a75b4d 100644 --- a/tests/entrypoints/openai/test_vision.py +++ b/tests/entrypoints/openai/test_vision.py @@ -89,7 +89,7 @@ async def test_single_chat_session_image(client: openai.AsyncOpenAI, choice = chat_completion.choices[0] assert choice.finish_reason == "length" assert chat_completion.usage == openai.types.CompletionUsage( - completion_tokens=10, prompt_tokens=775, total_tokens=785) + completion_tokens=10, prompt_tokens=772, total_tokens=782) message = choice.message message = chat_completion.choices[0].message @@ -181,7 +181,7 @@ async def test_single_chat_session_image_base64encoded( choice = chat_completion.choices[0] assert choice.finish_reason == "length" assert chat_completion.usage == openai.types.CompletionUsage( - completion_tokens=10, prompt_tokens=775, total_tokens=785) + completion_tokens=10, prompt_tokens=772, total_tokens=782) message = choice.message message = chat_completion.choices[0].message diff --git a/tests/entrypoints/openai/test_vision_embedding.py b/tests/entrypoints/openai/test_vision_embedding.py index 425f2a10ec855..d0c43b47bf0af 100644 --- a/tests/entrypoints/openai/test_vision_embedding.py +++ b/tests/entrypoints/openai/test_vision_embedding.py @@ -95,5 +95,5 @@ async def test_image_embedding(server: RemoteOpenAIServer, model_name: str, assert len(embeddings["data"]) == 1 assert len(embeddings["data"][0]["embedding"]) == 3072 assert embeddings["usage"]["completion_tokens"] == 0 - assert embeddings["usage"]["prompt_tokens"] == 765 - assert embeddings["usage"]["total_tokens"] == 765 + assert embeddings["usage"]["prompt_tokens"] == 762 + assert embeddings["usage"]["total_tokens"] == 762 diff --git a/tests/kernels/test_causal_conv1d.py b/tests/kernels/test_causal_conv1d.py index 51be2425d7dd7..f9b11018288be 100644 --- a/tests/kernels/test_causal_conv1d.py +++ b/tests/kernels/test_causal_conv1d.py @@ -149,14 +149,13 @@ def causal_conv1d_opcheck_fn(x: torch.Tensor, @pytest.mark.parametrize("itype", [torch.bfloat16, torch.float]) @pytest.mark.parametrize("silu_activation", [True]) @pytest.mark.parametrize("has_bias", [True]) -@pytest.mark.parametrize("has_initial_state", [True, False]) @pytest.mark.parametrize("width", [4]) @pytest.mark.parametrize( 'seqlen', [1, 8, 16, 32, 64, 128, 256, 512, 784, 1024, 1025, 2048, 4096]) @pytest.mark.parametrize('dim', [64]) @pytest.mark.parametrize('batch', [1]) def test_causal_conv1d(batch, dim, seqlen, width, has_bias, silu_activation, - has_initial_state, itype): + itype): device = "cuda" rtol, atol = (3e-4, 1e-3) if itype == torch.float32 else (3e-3, 5e-3) if itype == torch.bfloat16: @@ -168,18 +167,11 @@ def test_causal_conv1d(batch, dim, seqlen, width, has_bias, silu_activation, weight = torch.randn(dim, width, device=device, dtype=itype) bias = torch.randn(dim, device=device, dtype=itype) if has_bias else None - if has_initial_state: - initial_states = torch.randn(batch, - dim, - width - 1, - device=device, - dtype=itype) - has_initial_state_tensor = torch.ones(batch, - dtype=torch.bool, - device=x.device) - else: - initial_states = None - has_initial_state_tensor = None + initial_states = torch.randn(batch, + dim, + width - 1, + device=device, + dtype=itype) x_ref = x.clone() weight_ref = weight.clone() bias_ref = bias.clone() if bias is not None else None @@ -191,7 +183,9 @@ def test_causal_conv1d(batch, dim, seqlen, width, has_bias, silu_activation, bias, activation=activation, conv_states=initial_states, - has_initial_state=has_initial_state_tensor) + has_initial_state=torch.ones(batch, + dtype=torch.bool, + device=x.device)) out_ref, final_states_ref = causal_conv1d_ref( x_ref, weight_ref, @@ -199,12 +193,11 @@ def test_causal_conv1d(batch, dim, seqlen, width, has_bias, silu_activation, initial_states=initial_states_ref, return_final_states=True, activation=activation) - if has_initial_state: - assert initial_states is not None and final_states_ref is not None - assert torch.allclose(initial_states, - final_states_ref, - rtol=rtol, - atol=atol) + assert initial_states is not None and final_states_ref is not None + assert torch.allclose(initial_states, + final_states_ref, + rtol=rtol, + atol=atol) assert torch.allclose(out, out_ref, rtol=rtol, atol=atol) causal_conv1d_opcheck_fn(x, @@ -212,7 +205,9 @@ def test_causal_conv1d(batch, dim, seqlen, width, has_bias, silu_activation, bias, activation=activation, conv_states=initial_states, - has_initial_state=has_initial_state_tensor) + has_initial_state=torch.ones(batch, + dtype=torch.bool, + device=x.device)) @pytest.mark.parametrize("itype", [torch.bfloat16]) diff --git a/tests/lora/test_layers.py b/tests/lora/test_layers.py index fb8c0b2a7ba26..a113e3f7abc1e 100644 --- a/tests/lora/test_layers.py +++ b/tests/lora/test_layers.py @@ -28,7 +28,7 @@ # yapf: enable from vllm.lora.models import (LongContextLoRAContext, LoRALayerWeights, PackedLoRALayerWeights) -from vllm.lora.punica_wrapper import get_punica_wrapper +from vllm.lora.punica import PunicaWrapper from vllm.model_executor.layers.linear import (ColumnParallelLinear, MergedColumnParallelLinear, QKVParallelLinear, @@ -48,12 +48,11 @@ torch.float32: (5e-3, 5e-3), torch.bfloat16: (3e-2, 2e-2), } -# TODO: Modify this based on platform -DEVICES = [ +CUDA_DEVICES = [ f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2) ] -#For GPU, we will launch different triton kernels between the prefill and decode +# We will launch different triton kernels between the prefill and decode # stages, so we need to verify this. prefill stage(True) or decode stage(False) STAGES = [True, False] @@ -193,18 +192,9 @@ def create_random_inputs( return inputs, index_mapping, prompt_mapping -def check_punica_wrapper(punica_wrapper) -> bool: - if current_platform.is_cuda_alike(): - from vllm.lora.punica_wrapper.punica_gpu import PunicaWrapperGPU - - return type(punica_wrapper) is PunicaWrapperGPU - else: - return False - - @torch.inference_mode() @pytest.mark.parametrize("num_loras", [1, 2, 4, 8]) -@pytest.mark.parametrize("device", DEVICES) +@pytest.mark.parametrize("device", CUDA_DEVICES) @pytest.mark.parametrize("vocab_size", [512, 32000, 64000, 128000]) @pytest.mark.parametrize("stage", STAGES) def test_embeddings(dist_init, num_loras, device, vocab_size, stage) -> None: @@ -215,8 +205,7 @@ def test_embeddings(dist_init, num_loras, device, vocab_size, stage) -> None: torch.set_default_device(device) max_loras = 8 - punica_wrapper = get_punica_wrapper(8192, 256, device) - assert check_punica_wrapper(punica_wrapper) + punica_wrapper = PunicaWrapper(8192, 256, device) lora_config = LoRAConfig(max_loras=max_loras, max_lora_rank=8, lora_dtype=torch.float16) @@ -307,7 +296,7 @@ def create_random_embedding_layer(): # @pytest.mark.skip( # reason="Fails when loras are in any slot other than the first.") @pytest.mark.parametrize("num_loras", [1, 2, 4, 8]) -@pytest.mark.parametrize("device", DEVICES) +@pytest.mark.parametrize("device", CUDA_DEVICES) @pytest.mark.parametrize("vocab_size", [512, 32000, 64000, 128000]) @pytest.mark.parametrize("stage", STAGES) def test_embeddings_with_new_embeddings(dist_init, num_loras, device, @@ -316,8 +305,7 @@ def test_embeddings_with_new_embeddings(dist_init, num_loras, device, torch.cuda.set_device(device) torch.set_default_device(device) max_loras = 8 - punica_wrapper = get_punica_wrapper(8192, 256, device) - assert check_punica_wrapper(punica_wrapper) + punica_wrapper = PunicaWrapper(8192, 256, device) lora_config = LoRAConfig(max_loras=max_loras, max_lora_rank=8, lora_dtype=torch.float16) @@ -444,7 +432,7 @@ def create_random_embedding_layer(): @torch.inference_mode() @pytest.mark.parametrize("num_loras", [1, 2, 4, 8]) -@pytest.mark.parametrize("device", DEVICES) +@pytest.mark.parametrize("device", CUDA_DEVICES) @pytest.mark.parametrize("vocab_size", [512, 32000, 64000, 256512]) @pytest.mark.parametrize("stage", STAGES) def test_lm_head_logits_processor(dist_init, num_loras, device, vocab_size, @@ -453,8 +441,7 @@ def test_lm_head_logits_processor(dist_init, num_loras, device, vocab_size, torch.cuda.set_device(device) torch.set_default_device(device) max_loras = 8 - punica_wrapper = get_punica_wrapper(8192, 256, device) - assert check_punica_wrapper(punica_wrapper) + punica_wrapper = PunicaWrapper(8192, 256, device) lora_config = LoRAConfig(max_loras=max_loras, max_lora_rank=8, lora_dtype=torch.float16) @@ -576,7 +563,7 @@ def _pretest(): @torch.inference_mode() @pytest.mark.parametrize("num_loras", [1, 2, 4, 8]) -@pytest.mark.parametrize("device", DEVICES) +@pytest.mark.parametrize("device", CUDA_DEVICES) @pytest.mark.parametrize("stage", STAGES) @pytest.mark.parametrize("bias_enabled", [True, False]) def test_linear_replicated(dist_init, num_loras, device, stage, @@ -584,8 +571,7 @@ def test_linear_replicated(dist_init, num_loras, device, stage, torch.cuda.set_device(device) torch.set_default_device(device) - punica_wrapper = get_punica_wrapper(8192, 256, device) - assert check_punica_wrapper(punica_wrapper) + punica_wrapper = PunicaWrapper(8192, 256, device) max_loras = 8 lora_config = LoRAConfig(max_loras=max_loras, max_lora_rank=8, @@ -689,7 +675,7 @@ def create_random_linear_replicated_layer(): @pytest.mark.parametrize("num_loras", [1, 2, 4, 8]) @pytest.mark.parametrize("orientation", ["row", "column"]) @pytest.mark.parametrize("fully_shard", [True, False]) -@pytest.mark.parametrize("device", DEVICES) +@pytest.mark.parametrize("device", CUDA_DEVICES) @pytest.mark.parametrize("stage", STAGES) @pytest.mark.parametrize("bias_enabled", [True, False]) def test_linear_parallel(dist_init, num_loras, orientation, fully_shard, @@ -697,8 +683,7 @@ def test_linear_parallel(dist_init, num_loras, orientation, fully_shard, torch.cuda.set_device(device) torch.set_default_device(device) - punica_wrapper = get_punica_wrapper(8192, 256, device) - assert check_punica_wrapper(punica_wrapper) + punica_wrapper = PunicaWrapper(8192, 256, device) max_loras = 8 lora_config = LoRAConfig(max_loras=max_loras, max_lora_rank=8, @@ -812,7 +797,7 @@ def create_random_linear_parallel_layer(): @pytest.mark.parametrize("num_loras", [1, 2, 4, 8]) @pytest.mark.parametrize("repeats", [1, 2, 3]) @pytest.mark.parametrize("fully_shard", [True, False]) -@pytest.mark.parametrize("device", DEVICES) +@pytest.mark.parametrize("device", CUDA_DEVICES) @pytest.mark.parametrize("stage", STAGES) @pytest.mark.parametrize("bias_enabled", [True, False]) def test_column_parallel_packed(dist_init, num_loras, repeats, fully_shard, @@ -820,8 +805,7 @@ def test_column_parallel_packed(dist_init, num_loras, repeats, fully_shard, torch.cuda.set_device(device) torch.set_default_device(device) - punica_wrapper = get_punica_wrapper(8192, 256, device) - assert check_punica_wrapper(punica_wrapper) + punica_wrapper = PunicaWrapper(8192, 256, device) max_loras = 8 lora_config = LoRAConfig(max_loras=max_loras, max_lora_rank=8, @@ -979,8 +963,7 @@ def test_rotary_embedding_long_context(dist_init, num_loras, device, seed = 0 current_platform.seed_everything(seed) torch.set_default_device(device) - punica_wrapper = get_punica_wrapper(8192, 256, device) - assert check_punica_wrapper(punica_wrapper) + punica_wrapper = PunicaWrapper(8192, 256, device) max_loras = 8 lora_config = LoRAConfig(max_loras=max_loras, max_lora_rank=8, diff --git a/tests/metrics/test_metrics.py b/tests/metrics/test_metrics.py index b3c7850556f90..4a824c7acef21 100644 --- a/tests/metrics/test_metrics.py +++ b/tests/metrics/test_metrics.py @@ -411,7 +411,7 @@ def log(self, *args, **kwargs): logger = _RayPrometheusStatLogger( local_interval=0.5, labels=dict(model_name=engine.model_config.served_model_name), - vllm_config=engine.vllm_config) + max_model_len=engine.model_config.max_model_len) engine.add_logger("ray", logger) for i, prompt in enumerate(example_prompts): engine.add_request( diff --git a/tests/models/decoder_only/vision_language/mm_processor_kwargs/test_phi3v.py b/tests/models/decoder_only/vision_language/mm_processor_kwargs/test_phi3v.py index c16192a1e1438..60a8f63eb5faa 100644 --- a/tests/models/decoder_only/vision_language/mm_processor_kwargs/test_phi3v.py +++ b/tests/models/decoder_only/vision_language/mm_processor_kwargs/test_phi3v.py @@ -2,10 +2,12 @@ from typing import Optional import pytest -from transformers import AutoTokenizer +import torch +from transformers import AutoImageProcessor, AutoTokenizer -from vllm.inputs import InputContext, InputProcessingContext +from vllm.inputs import InputContext, token_inputs from vllm.model_executor.models.phi3v import _IMAGE_TOKEN_ID +from vllm.multimodal import MultiModalRegistry from .....conftest import _ImageAssets from ....utils import build_model_context @@ -15,9 +17,15 @@ # Wrap lazy imports to avoid initializing CUDA during test collection @pytest.fixture() -def processor_for_phi3v(): - from vllm.model_executor.models.phi3v import Phi3VProcessor - return Phi3VProcessor +def input_processor_for_phi3v(): + from vllm.model_executor.models.phi3v import input_processor_for_phi3v + return input_processor_for_phi3v + + +@pytest.fixture() +def dummy_data_for_phi3v(): + from vllm.model_executor.models.phi3v import dummy_data_for_phi3v + return dummy_data_for_phi3v @pytest.fixture() @@ -26,6 +34,53 @@ def get_max_phi3v_image_tokens(): return get_max_phi3v_image_tokens +@pytest.mark.parametrize("model", models) +@pytest.mark.parametrize("num_crops", [4, 16, None]) +def test_input_mapper_override(model: str, image_assets: _ImageAssets, + num_crops: Optional[int]): + """Ensure that the [default] input mapper handles num_crops properly.""" + # We pass the processor kwargs here since for this model, we fall back to + # the default mapper; this will fall back to the HF mapper and forward + # mm_processor_kwargs to it. + mm_processor_kwargs = { + "num_crops": num_crops + } if num_crops is not None else {} + ctx = build_model_context( + model_name=model, + tokenizer_name=model, + trust_remote_code=True, + mm_processor_kwargs=mm_processor_kwargs, + ) + + hf_processor = AutoImageProcessor.from_pretrained(model, + trust_remote_code=True, + **mm_processor_kwargs) + + mm_registry = MultiModalRegistry() + mm_registry.init_mm_limits_per_prompt(ctx.model_config) + + image = image_assets[0].pil_image + hf_result = hf_processor.preprocess( + image, + return_tensors="pt", + ) + + vllm_result = mm_registry.map_input( + ctx.model_config, + {"image": image}, + ) + + assert torch.all(hf_result["image_sizes"] == vllm_result["image_sizes"]) + assert torch.all( + hf_result["num_img_tokens"] == vllm_result["num_img_tokens"]) + + # For pixel values, the second axis should be the num_crops + 1 + # for the rescaled original image. The default value in VLLM falls + # back to the HF config, which is why we compare to the processor num_crops + assert torch.all(hf_result["pixel_values"] == vllm_result["pixel_values"]) + assert vllm_result["pixel_values"].shape[1] == hf_processor.num_crops + 1 + + @pytest.mark.parametrize("model", models) @pytest.mark.parametrize("num_crops,expected_max_tokens", [ (4, 781), @@ -57,20 +112,48 @@ def test_max_tokens_override(get_max_phi3v_image_tokens, model: str, @pytest.mark.parametrize("model", models) -@pytest.mark.parametrize( - "num_crops,expected_toks_per_img,num_imgs", - [ - (4, 757, 1), - (4, 757, 2), - (16, 1921, 1), - (16, 1921, 2), - # the default num_crops of phi-3.5-vision is 4 - (None, 757, 2), - (None, 757, 2), - ]) -def test_processor_override(processor_for_phi3v, image_assets: _ImageAssets, - model: str, num_crops: Optional[int], - expected_toks_per_img: int, num_imgs: int): +@pytest.mark.parametrize("num_crops,toks_per_img,num_imgs", [ + (4, 781, 1), + (4, 781, 2), + (16, 2653, 1), + (16, 2653, 2), +]) +def test_dummy_data_override(dummy_data_for_phi3v, model: str, num_crops: int, + toks_per_img: int, num_imgs: int): + """Ensure dummy_data_for_phi3v handles num_crops properly.""" + # Same as the previous test - don't initialize mm_processor_kwargs + # in this test and assume that the kwargs will be correctly expanded by + # the partial when calling the dummy data func. + ctx = build_model_context( + model_name=model, + tokenizer_name=model, + trust_remote_code=True, + mm_processor_kwargs=None, + ) + + dummy_data = dummy_data_for_phi3v( + ctx=ctx, + seq_len=8192, # Should be bigger than num_imgs * toks_per_img + mm_counts={"image": num_imgs}, + num_crops=num_crops, + ) + sequence_data = dummy_data.seq_data + # Ensure we have the right number of placeholders per num_crops size + img_tok_count = sequence_data.get_token_ids().count(_IMAGE_TOKEN_ID) + assert img_tok_count == toks_per_img * num_imgs + + +@pytest.mark.parametrize("model", models) +@pytest.mark.parametrize("num_crops,expected_toks_per_img,num_imgs", [ + (4, 757, 1), + (4, 757, 2), + (16, 1921, 1), + (16, 1921, 2), +]) +def test_input_processor_override(input_processor_for_phi3v, + image_assets: _ImageAssets, model: str, + num_crops: int, expected_toks_per_img: int, + num_imgs: int): """Ensure input_processor_for_phi3v handles num_crops properly.""" # Same as the previous test - don't initialize mm_processor_kwargs # in this test and assume that the kwargs will be correctly expanded by @@ -80,20 +163,19 @@ def test_processor_override(processor_for_phi3v, image_assets: _ImageAssets, tokenizer_name=model, trust_remote_code=True, ) - tokenizer = AutoTokenizer.from_pretrained(model, trust_remote_code=True) - ctx = InputProcessingContext(ctx.model_config, tokenizer) + tokenizer = AutoTokenizer.from_pretrained(model) # Build the image str / prompt based on the number of images we pass img_str = "".join([f"<|image_{idx}|>\n" for idx in range(1, num_imgs + 1)]) prompt = f"<|user|>\n{img_str}<|end|>\n<|assistant|>\n" images = [image_assets[0].pil_image] * num_imgs - mm_data = {"image": images} - mm_processor_kwargs = {} - if num_crops is not None: - mm_processor_kwargs = {"num_crops": num_crops} + inputs = token_inputs(prompt_token_ids=tokenizer.encode(prompt), + prompt=prompt, + multi_modal_data={"image": images}) - processor = processor_for_phi3v(ctx) - processed_inputs = processor.apply(prompt, mm_data, mm_processor_kwargs) + processed_inputs = input_processor_for_phi3v(ctx, + inputs, + num_crops=num_crops) # Ensure we have the right number of placeholders per num_crops size img_tok_count = processed_inputs["prompt_token_ids"].count(_IMAGE_TOKEN_ID) diff --git a/tests/models/decoder_only/vision_language/test_models.py b/tests/models/decoder_only/vision_language/test_models.py index ed8f34a677f84..924f19c4448b8 100644 --- a/tests/models/decoder_only/vision_language/test_models.py +++ b/tests/models/decoder_only/vision_language/test_models.py @@ -34,7 +34,7 @@ "dtype": "half", "max_tokens": 5, "tensor_parallel_size": 2, - "hf_model_kwargs": {"device_map": "auto"}, + "model_kwargs": {"device_map": "auto"}, "image_size_factors": [(.25, 0.5, 1.0)], "distributed_executor_backend": ( "ray", @@ -108,7 +108,7 @@ "cherry_blossom": "What is in the picture?", }), auto_cls=AutoModelForVision2Seq, - postprocess_inputs=model_utils.cast_dtype_post_processor( + postprocess_inputs=model_utils.get_key_type_post_processor( "pixel_values" ), vllm_output_post_proc=model_utils.paligemma_vllm_to_hf_output, @@ -151,7 +151,7 @@ "cherry_blossom": "Please infer the season with reason.", }), multi_image_prompt="Describe the two images shortly.", # noqa: E501 - postprocess_inputs=model_utils.cast_dtype_post_processor("pixel_values"), + postprocess_inputs=model_utils.get_key_type_post_processor("pixel_values"), stop_str=["<|im_end|>"], image_size_factors=[(0.10, 0.15)], max_tokens=64, @@ -177,7 +177,7 @@ prompt_formatter=lambda img_prompt: f"USER: {img_prompt}\nASSISTANT:", max_model_len=4096, auto_cls=AutoModelForVision2Seq, - postprocess_inputs=model_utils.cast_dtype_post_processor( + postprocess_inputs=model_utils.get_key_type_post_processor( "pixel_values" ), # For chameleon, we only compare the sequences @@ -281,7 +281,7 @@ prompt_formatter=lambda vid_prompt: f"<|im_start|>user\n{vid_prompt}<|im_end|>\n<|im_start|>assistant\n", # noqa: E501 num_video_frames=16, max_model_len=16384, - postprocess_inputs=model_utils.cast_dtype_post_processor( + postprocess_inputs=model_utils.get_key_type_post_processor( "pixel_values_videos" ), auto_cls=AutoModelForVision2Seq, @@ -306,20 +306,6 @@ vllm_output_post_proc=model_utils.llava_video_vllm_to_hf_output, image_sizes=[((1669, 2560), (2560, 1669), (183, 488), (488, 183))], ), - "mantis": VLMTestInfo( - models=["TIGER-Lab/Mantis-8B-siglip-llama3"], - test_type=(VLMTestType.IMAGE, VLMTestType.MULTI_IMAGE), - prompt_formatter=lambda img_prompt: f"<|start_header_id|>user<|end_header_id|>\n\n{img_prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", # noqa: E501 - max_model_len=4096, - postprocess_inputs=model_utils.cast_dtype_post_processor( - "pixel_values" - ), - vllm_runner_kwargs={"hf_overrides": {"architectures": ["MantisForConditionalGeneration"]}}, # noqa: E501 - get_stop_token_ids=lambda tok: [128009], - auto_cls=AutoModelForVision2Seq, - vllm_output_post_proc=model_utils.mantis_vllm_to_hf_output, - patch_hf_runner=model_utils.mantis_patch_hf_runner, - ), "minicpmv_25": VLMTestInfo( models=["openbmb/MiniCPM-Llama3-V-2_5"], test_type=VLMTestType.IMAGE, @@ -356,7 +342,7 @@ # max_num_seqs=2, # task="generate", # # use eager mode for hf runner since phi3v didn't work with flash_attn - # hf_model_kwargs={"_attn_implementation": "eager"}, + # model_kwargs={"_attn_implementation": "eager"}, # use_tokenizer_eos=True, # vllm_output_post_proc=model_utils.phi3v_vllm_to_hf_output, # num_logprobs=10, @@ -387,7 +373,7 @@ prompt_formatter=lambda img_prompt: f"USER: {img_prompt}\nASSISTANT:", max_model_len=4096, auto_cls=AutoModelForVision2Seq, - postprocess_inputs=model_utils.cast_dtype_post_processor( + postprocess_inputs=model_utils.get_key_type_post_processor( "pixel_values" ), vllm_output_post_proc = lambda vllm_output, model: vllm_output[:2], @@ -452,7 +438,7 @@ test_type=VLMTestType.CUSTOM_INPUTS, max_model_len=16384, max_num_seqs=2, - postprocess_inputs=model_utils.cast_dtype_post_processor( + postprocess_inputs=model_utils.get_key_type_post_processor( "pixel_values" ), auto_cls=AutoModelForVision2Seq, diff --git a/tests/models/decoder_only/vision_language/vlm_utils/core.py b/tests/models/decoder_only/vision_language/vlm_utils/core.py index 54b7b0733210f..88349ef9a3a69 100644 --- a/tests/models/decoder_only/vision_language/vlm_utils/core.py +++ b/tests/models/decoder_only/vision_language/vlm_utils/core.py @@ -3,11 +3,9 @@ import torch from PIL.Image import Image -from transformers import AutoTokenizer, BatchEncoding, PreTrainedTokenizerBase +from transformers import AutoTokenizer, BatchEncoding from transformers.models.auto.auto_factory import _BaseAutoModelClass -from vllm.config import TaskOption - from .....conftest import HfRunner, VllmRunner from .types import RunnerOutput @@ -30,15 +28,13 @@ def run_test( use_tokenizer_eos: bool, postprocess_inputs: Callable[[BatchEncoding], BatchEncoding], comparator: Callable[..., None], - get_stop_token_ids: Optional[Callable[[PreTrainedTokenizerBase], - List[int]]], + get_stop_token_ids: Optional[Callable[[AutoTokenizer], List[int]]], stop_str: Optional[List[str]], tokenizer_mode: str, limit_mm_per_prompt: Dict[str, int], - vllm_runner_kwargs: Optional[Dict[str, Any]], - hf_model_kwargs: Optional[Dict[str, Any]], + model_kwargs: Optional[Dict[str, Any]], patch_hf_runner: Optional[Callable[[HfRunner], HfRunner]], - task: TaskOption = "auto", + task: str = "auto", runner_mm_key: str = "images", distributed_executor_backend: Optional[str] = None, tensor_parallel_size: int = 1, @@ -62,9 +58,6 @@ def run_test( if stop_str: vllm_kwargs["stop"] = stop_str - if vllm_runner_kwargs is None: - vllm_runner_kwargs = {} - with vllm_runner(model, tokenizer_mode=tokenizer_mode, max_model_len=max_model_len, @@ -74,8 +67,7 @@ def run_test( tensor_parallel_size=tensor_parallel_size, distributed_executor_backend=distributed_executor_backend, enforce_eager=enforce_eager, - task=task, - **vllm_runner_kwargs) as vllm_model: + task=task) as vllm_model: for prompts, media in vllm_inputs: vllm_kwargs[runner_mm_key] = media vllm_output = vllm_model.generate_greedy_logprobs( @@ -86,7 +78,7 @@ def run_test( dtype=dtype, auto_cls=auto_cls, postprocess_inputs=postprocess_inputs, - model_kwargs=hf_model_kwargs) + model_kwargs=model_kwargs) # Some models need to patch things like the model processor, e.g., internvl if patch_hf_runner is not None: diff --git a/tests/models/decoder_only/vision_language/vlm_utils/model_utils.py b/tests/models/decoder_only/vision_language/vlm_utils/model_utils.py index 3eca8fb9dcb1a..15f15dd7d8030 100644 --- a/tests/models/decoder_only/vision_language/vlm_utils/model_utils.py +++ b/tests/models/decoder_only/vision_language/vlm_utils/model_utils.py @@ -126,16 +126,6 @@ def llava_onevision_vllm_to_hf_output(vllm_output: RunnerOutput, return hf_output_ids, hf_output_str, out_logprobs -def mantis_vllm_to_hf_output(vllm_output: RunnerOutput, - model: str) -> RunnerOutput: - """Sanitize vllm output [mantis] to compare with hf output.""" - output_ids, output_str, out_logprobs = vllm_output - - hf_output_str = output_str + "<|eot_id|>" - - return output_ids, hf_output_str, out_logprobs - - def phi3v_vllm_to_hf_output(vllm_output: RunnerOutput, model: str) -> RunnerOutput: """Sanitize vllm output [phi3v] to be comparable with hf output.""" @@ -194,7 +184,7 @@ def get_llava_embeddings(image_assets: _ImageAssets): ####### postprocessors to run on HF BatchEncoding -def cast_dtype_post_processor( +def get_key_type_post_processor( hf_inp_key: str) -> Callable[[BatchEncoding, str], BatchEncoding]: """Gets a handle to a post processor which converts a given key into a target data type.""" @@ -428,26 +418,3 @@ def _internvl_generate( ) return outputs - - -def mantis_patch_hf_runner(hf_model: HfRunner) -> HfRunner: - from mantis.models.mllava import MLlavaProcessor - - hf_model.processor = MLlavaProcessor.from_pretrained(hf_model.model_name) - - orig_generate = hf_model.model.generate - tokenizer = hf_model.processor.tokenizer - - def _generate(self, *args, **kwargs): - return orig_generate( - *args, - **kwargs, - eos_token_id=[ - tokenizer.eos_token_id, - tokenizer.convert_tokens_to_ids("<|eot_id|>"), - ], - ) - - hf_model.model.generate = types.MethodType(_generate, hf_model.model) - - return hf_model diff --git a/tests/models/decoder_only/vision_language/vlm_utils/types.py b/tests/models/decoder_only/vision_language/vlm_utils/types.py index e2e0c6390fcb9..d410fa8c653ce 100644 --- a/tests/models/decoder_only/vision_language/vlm_utils/types.py +++ b/tests/models/decoder_only/vision_language/vlm_utils/types.py @@ -7,11 +7,9 @@ import torch from PIL.Image import Image from pytest import MarkDecorator -from transformers import (AutoModelForCausalLM, BatchEncoding, - PreTrainedTokenizerBase) +from transformers import AutoModelForCausalLM, AutoTokenizer, BatchEncoding from transformers.models.auto.auto_factory import _BaseAutoModelClass -from vllm.config import TaskOption from vllm.sequence import SampleLogprobs from vllm.utils import identity @@ -68,7 +66,7 @@ class ImageSizeWrapper(NamedTuple): class VLMTestInfo(NamedTuple): """Holds the configuration for 1+ tests for one model architecture.""" - models: List[str] + models: Union[List[str]] test_type: Union[VLMTestType, Iterable[VLMTestType]] # Should be None only if this is a CUSTOM_INPUTS test @@ -94,20 +92,18 @@ class VLMTestInfo(NamedTuple): enforce_eager: bool = True max_model_len: int = 1024 max_num_seqs: int = 256 - task: TaskOption = "auto" + task: str = "auto" tensor_parallel_size: int = 1 - vllm_runner_kwargs: Optional[Dict[str, Any]] = None # Optional callable which gets a list of token IDs from the model tokenizer - get_stop_token_ids: Optional[Callable[[PreTrainedTokenizerBase], - List[int]]] = None + get_stop_token_ids: Optional[Callable[[AutoTokenizer], List[int]]] = None # Optional list of strings to stop generation, useful when stop tokens are # not special tokens in the tokenizer stop_str: Optional[List[str]] = None # Exposed options for HF runner - hf_model_kwargs: Optional[Dict[str, Any]] = None - # Indicates we should explicitly pass the EOS from the tokenizer + model_kwargs: Optional[Dict[str, Any]] = None + # Indicates we should explicitly pass the EOS from the tokeniezr use_tokenizer_eos: bool = False auto_cls: Type[_BaseAutoModelClass] = AutoModelForCausalLM # Callable to pass to the HF runner to run on inputs; for now, we also pass @@ -168,7 +164,6 @@ def get_non_parametrized_runner_kwargs(self): "max_num_seqs": self.max_num_seqs, "task": self.task, "tensor_parallel_size": self.tensor_parallel_size, - "vllm_runner_kwargs": self.vllm_runner_kwargs, "hf_output_post_proc": self.hf_output_post_proc, "vllm_output_post_proc": self.vllm_output_post_proc, "auto_cls": self.auto_cls, @@ -176,8 +171,8 @@ def get_non_parametrized_runner_kwargs(self): "postprocess_inputs": self.postprocess_inputs, "comparator": self.comparator, "get_stop_token_ids": self.get_stop_token_ids, - "hf_model_kwargs": self.hf_model_kwargs, "stop_str": self.stop_str, + "model_kwargs": self.model_kwargs, "patch_hf_runner": self.patch_hf_runner, "tokenizer_mode": self.tokenizer_mode } diff --git a/tests/models/registry.py b/tests/models/registry.py index a89518820045f..461f453d8b1c3 100644 --- a/tests/models/registry.py +++ b/tests/models/registry.py @@ -176,7 +176,6 @@ class _HfExamplesInfo: "LlavaNextForConditionalGeneration": _HfExamplesInfo("llava-hf/llava-v1.6-mistral-7b-hf"), # noqa: E501 "LlavaNextVideoForConditionalGeneration": _HfExamplesInfo("llava-hf/LLaVA-NeXT-Video-7B-hf"), # noqa: E501 "LlavaOnevisionForConditionalGeneration": _HfExamplesInfo("llava-hf/llava-onevision-qwen2-0.5b-ov-hf"), # noqa: E501 - "MantisForConditionalGeneration": _HfExamplesInfo("TIGER-Lab/Mantis-8B-siglip-llama3"), # noqa: E501 "MiniCPMV": _HfExamplesInfo("openbmb/MiniCPM-Llama3-V-2_5", trust_remote_code=True), "MolmoForCausalLM": _HfExamplesInfo("allenai/Molmo-7B-D-0924", diff --git a/tests/multimodal/test_mapper.py b/tests/multimodal/test_mapper.py index 71832acbd17b8..13ad4a7966b9d 100644 --- a/tests/multimodal/test_mapper.py +++ b/tests/multimodal/test_mapper.py @@ -2,7 +2,7 @@ import numpy as np import pytest -from transformers import LlavaNextImageProcessor +from transformers import CLIPImageProcessor, LlavaNextImageProcessor from vllm.config import ModelConfig from vllm.multimodal import MultiModalRegistry @@ -14,6 +14,49 @@ def mm_registry(): return MultiModalRegistry() +@pytest.mark.parametrize("dtype", ["half", "float"]) +@pytest.mark.parametrize("size_factor", [0.25, 0.5, 1.0]) +def test_clip_image_processor(image_assets, mm_registry, dtype, size_factor): + MODEL_NAME = "llava-hf/llava-1.5-7b-hf" + + hf_processor = CLIPImageProcessor.from_pretrained(MODEL_NAME) + assert isinstance(hf_processor, CLIPImageProcessor) + + model_config = ModelConfig( + model=MODEL_NAME, + task="auto", + tokenizer=MODEL_NAME, + tokenizer_mode="auto", + trust_remote_code=False, + seed=0, + dtype=dtype, + revision=None, + limit_mm_per_prompt={"image": 1}, + ) + + mm_registry.init_mm_limits_per_prompt(model_config) + + for asset in image_assets: + image = rescale_image_size(asset.pil_image, size_factor) + + hf_result = hf_processor.preprocess( + image, + return_tensors="pt", + ) + vllm_result = mm_registry.map_input( + model_config, + {"image": image}, + ) + + assert hf_result.keys() == vllm_result.keys() + for key, hf_tensor in hf_result.items(): + hf_arr: np.ndarray = hf_tensor.numpy() + vllm_arr: np.ndarray = vllm_result[key].numpy() + + assert hf_arr.shape == vllm_arr.shape, f"Failed for key={key}" + assert np.allclose(hf_arr, vllm_arr), f"Failed for key={key}" + + @pytest.mark.parametrize("dtype", ["half", "float"]) @pytest.mark.parametrize("size_factor", [0.25, 0.5, 1.0]) def test_llava_next_image_processor(image_assets, mm_registry, dtype, @@ -64,7 +107,7 @@ def test_llava_next_image_processor(image_assets, mm_registry, dtype, (2, 1, False), (2, 2, True)], ) def test_mm_limits(image_assets, mm_registry, num_images, limit, is_valid): - MODEL_NAME = "llava-hf/llava-v1.6-mistral-7b-hf" + MODEL_NAME = "llava-hf/llava-1.5-7b-hf" model_config = ModelConfig( model=MODEL_NAME, @@ -95,7 +138,7 @@ def test_mm_limits(image_assets, mm_registry, num_images, limit, is_valid): # NOTE: We don't test zero images since the HF processor doesn't support it @pytest.mark.parametrize("num_images", [1, 2]) def test_image_mapper_multi(image_assets, mm_registry, num_images): - MODEL_NAME = "llava-hf/llava-v1.6-mistral-7b-hf" + MODEL_NAME = "llava-hf/llava-1.5-7b-hf" model_config = ModelConfig( model=MODEL_NAME, diff --git a/tests/multimodal/test_processing.py b/tests/multimodal/test_processing.py index ae668d1dd56c8..b2367060c6c1b 100644 --- a/tests/multimodal/test_processing.py +++ b/tests/multimodal/test_processing.py @@ -3,15 +3,50 @@ import pytest from transformers import BatchFeature -from vllm.multimodal.processing import (PromptReplacement, _PlaceholderInfo, - find_text_matches, find_token_matches, - iter_placeholders, iter_token_matches, - replace_text_matches, - replace_token_matches) +from vllm.multimodal.processing import (PromptReplacement, find_text_matches, + find_token_matches, iter_token_matches, + iter_token_runs, replace_text_matches) from vllm.transformers_utils.tokenizer import AnyTokenizer from vllm.utils import full_groupby +# yapf: disable +@pytest.mark.parametrize( + ("token_ids", "expected"), + [ + ([], []), + ( + [32000, 32000, 32000], + [{ "token_id": 32000, "start_idx": 0, "length": 3 }], + ), + ( + [9833, 28747, 32000, 32000, 32000, 9833, 28747, 32000, 32000, 918], + [ + { "token_id": 9833, "start_idx": 0, "length": 1 }, + { "token_id": 28747, "start_idx": 1, "length": 1 }, + { "token_id": 32000, "start_idx": 2, "length": 3 }, + { "token_id": 9833, "start_idx": 5, "length": 1 }, + { "token_id": 28747, "start_idx": 6, "length": 1 }, + { "token_id": 32000, "start_idx": 7, "length": 2 }, + { "token_id": 918, "start_idx": 9, "length": 1 }, + ], + ), + ], +) +# yapf: enable +def test_iter_token_runs(token_ids, expected): + result = list(iter_token_runs(token_ids)) + + # Only displayed on error + print("result:", result) + + # Manually constructed results + assert [item._asdict() for item in result] == expected + + # Invariants + assert sum(run_info.length for run_info in result) == len(token_ids) + + # yapf: disable @pytest.mark.parametrize( ("token_ids", "match_ids", "expected"), @@ -135,11 +170,13 @@ def test_find_token_matches(prompt, target_by_key, expected_by_key): # Should not be used since there is nothing to convert to token IDs mock_tokenizer = cast(AnyTokenizer, object()) - prompt_repls = [ - PromptReplacement(target, [], 0).bind(key, mock_tokenizer) - for key, target in target_by_key.items() - ] - result = find_token_matches(prompt, prompt_repls) + result = find_token_matches( + prompt, + [ + PromptReplacement(target, [], 0).bind(key, mock_tokenizer) + for key, target in target_by_key.items() + ], + ) # Only displayed on error print("result:", result) @@ -242,11 +279,13 @@ def test_find_text_matches(prompt, target_by_key, expected_by_key): # Should not be used since there is nothing to convert to text mock_tokenizer = cast(AnyTokenizer, object()) - prompt_repls = [ - PromptReplacement(target, [], 0).bind(key, mock_tokenizer) - for key, target in target_by_key.items() - ] - result = find_text_matches(prompt, prompt_repls) + result = find_text_matches( + prompt, + [ + PromptReplacement(target, [], 0).bind(key, mock_tokenizer) + for key, target in target_by_key.items() + ], + ) # Only displayed on error print("result:", result) @@ -264,7 +303,7 @@ def test_find_text_matches(prompt, target_by_key, expected_by_key): # yapf: disable @pytest.mark.parametrize( - ("prompt", "target_by_key", "repl_by_key"), + ("prompt", "target_by_key", "repl_by_key", "expected_by_mm_count"), [ ( "Image:Image:!", @@ -283,201 +322,49 @@ def test_find_text_matches(prompt, target_by_key, expected_by_key): # Test multiple repl_count "pattern_3": ("?", 2), }, - ), - ] -) -@pytest.mark.parametrize( - ("mm_count", "expected"), - [ - (0, "Image:Image:!"), - (1, "Image:??"), - (2, "??"), - ] -) -# yapf: enable -def test_find_replace_text( - prompt, - target_by_key, - repl_by_key, - mm_count, - expected, -): - # Should not be used since there is nothing to convert to text - mock_tokenizer = cast(AnyTokenizer, object()) - - prompt_repls = [ - PromptReplacement(target, *repl_by_key[key]).bind(key, mock_tokenizer) - for key, target in target_by_key.items() - ] - matches = find_text_matches(prompt, prompt_repls) - - result = replace_text_matches( - prompt, - matches, - {key: list(range(mm_count)) - for key in repl_by_key}, - BatchFeature(), - ) - - # Only displayed on error - print("matches:", matches) - print("result:", result) - - # Manually constructed results - assert result == expected - - -# yapf: disable -@pytest.mark.parametrize( - ("prompt", "target_by_key", "repl_by_key"), - [ - # Tokenized test cases of `test_find_replace_text` - # using the vocab of llava-hf/llava-v1.6-mistral-7b-hf - ( - [1, 9833, 28747, 32000, 9833, 28747, 32000, 32000, 918], { - # We use `` before `Image:` to test matches that - # occur out of order - "pattern_1": [32000], - "pattern_2": [9833, 28747], - "pattern_3": [918], - }, - { - # Test whether target is confused with repl_unit - "pattern_1": ([32000, 32000], 1), - # Test empty repl_unit - "pattern_2": ([], 1), - # Test multiple repl_count - "pattern_3": ([1550], 2), + # Test no replacement + 0: "Image:Image:!", + # Test single replacement + 1: "Image:??", + # Test repeated replacement + 2: "??", }, ), ] ) -@pytest.mark.parametrize( - ("mm_count", "expected"), - [ - (0, [1, 9833, 28747, 32000, 9833, 28747, 32000, 32000, 918]), - (1, [1, 32000, 32000, 9833, 28747, 32000, 32000, 1550, 1550]), - (2, [1, 32000, 32000, 32000, 32000, 32000, 1550, 1550]), - ] -) # yapf: enable -def test_find_replace_tokens( +def test_find_replace_text( prompt, target_by_key, repl_by_key, - mm_count, - expected, + expected_by_mm_count, ): - # Should not be used since there is nothing to convert to tokens + # Should not be used since there is nothing to convert to text mock_tokenizer = cast(AnyTokenizer, object()) - prompt_repls = [ - PromptReplacement(target, *repl_by_key[key]).bind(key, mock_tokenizer) - for key, target in target_by_key.items() - ] - matches = find_token_matches(prompt, prompt_repls) - - result = replace_token_matches( + matches = find_text_matches( prompt, - matches, - {key: list(range(mm_count)) - for key in repl_by_key}, - BatchFeature(), + [ + PromptReplacement(target, *repl_by_key[key]) \ + .bind(key, mock_tokenizer) + for key, target in target_by_key.items() + ], ) + result_by_mm_count = { + mm_count: replace_text_matches( + prompt, + matches, + {key: list(range(mm_count)) + for key in repl_by_key}, + BatchFeature(), + ) + for mm_count in expected_by_mm_count + } # Only displayed on error print("matches:", matches) - print("result:", result) - - # Manually constructed results - assert result == expected - - -# yapf: disable -@pytest.mark.parametrize( - "repl_by_key", - [ - { - "pattern_1": ([32000, 32000], 1), - "pattern_2": ([], 1), - "pattern_3": ([1550], 2), - }, - ], -) -@pytest.mark.parametrize( - ("prompt", "expected"), - [ - ( - [1, 9833, 28747, 32000, 9833, 28747, 32000, 32000, 918], - [ - _PlaceholderInfo( - modality="pattern_1", - start_idx=6, - unit=[32000, 32000], - unit_count=1, - ), - ], - ), - ( - [1, 32000, 32000, 9833, 28747, 32000, 32000, 1550, 1550], - [ - _PlaceholderInfo( - modality="pattern_1", - start_idx=1, - unit=[32000, 32000], - unit_count=1, - ), - _PlaceholderInfo( - modality="pattern_1", - start_idx=5, - unit=[32000, 32000], - unit_count=1, - ), - _PlaceholderInfo( - modality="pattern_3", - start_idx=7, - unit=[1550], - unit_count=2, - ), - ], - ), - ( - [1, 32000, 32000, 32000, 32000, 32000, 1550, 1550], - [ - _PlaceholderInfo( - modality="pattern_1", - start_idx=1, - unit=[32000, 32000], - unit_count=2, - ), - _PlaceholderInfo( - modality="pattern_3", - start_idx=6, - unit=[1550], - unit_count=2, - ), - ], - ), - ] -) -def test_iter_placeholders( - repl_by_key, - prompt, - expected, -): - # Should not be used since there is nothing to convert to tokens - mock_tokenizer = cast(AnyTokenizer, object()) - - prompt_repls = [ - PromptReplacement([], *repl).bind(key, mock_tokenizer) - for key, repl in repl_by_key.items() - ] - - result = list(iter_placeholders(prompt_repls, prompt)) - - # Only displayed on error - print("result:", result) + print("result_by_mm_count:", result_by_mm_count) # Manually constructed results - assert result == expected + assert result_by_mm_count == expected_by_mm_count diff --git a/tests/multimodal/test_processor_kwargs.py b/tests/multimodal/test_processor_kwargs.py index d141cdf1f083b..e6c8793989e13 100644 --- a/tests/multimodal/test_processor_kwargs.py +++ b/tests/multimodal/test_processor_kwargs.py @@ -15,13 +15,13 @@ # Used for fast tests where the model doesn't matter DUMMY_MODEL_ID = "facebook/opt-125m" # Used for tests that need a multimodal model -MULTIMODAL_MODEL_ID = "OpenGVLab/InternVL2-2B" +MULTIMODAL_MODEL_ID = "microsoft/Phi-3.5-vision-instruct" # For mm_processor_kwargs - we test overrides by defining mocks for each place # it is used, and ensuring that we can pass processor kwargs an override value # to receive the intended result for things like sequence length etc. -DEFAULT_MAX_DYNAMIC_PATCH = 6 -MAX_DYNAMIC_PATCH_OVERRIDE = 4 +DEFAULT_NUM_CROPS = 4 +NUM_CROPS_OVERRIDE = 16 # Mocks for all of the places that we use the mm_processor_kwargs @@ -33,11 +33,10 @@ def use_processor_mock(): def custom_processor(ctx: InputContext, inputs: DecoderOnlyInputs, *, - max_dynamic_patch=DEFAULT_MAX_DYNAMIC_PATCH): + num_crops=DEFAULT_NUM_CROPS): # For testing purposes, we don't worry about the prompt - return token_inputs( - prompt_token_ids=[], - mm_processor_kwargs={"max_dynamic_patch": max_dynamic_patch}) + return token_inputs(prompt_token_ids=[], + mm_processor_kwargs={"num_crops": num_crops}) with patch("vllm.inputs.registry.InputRegistry._get_model_input_processor", return_value=custom_processor): @@ -53,9 +52,9 @@ def custom_dummy_data_factory(self, seq_len: int, mm_counts: Mapping[str, int], *, - max_dynamic_patch=DEFAULT_MAX_DYNAMIC_PATCH): + num_crops=DEFAULT_NUM_CROPS): seq_data = SequenceData( - array(VLLM_TOKEN_ID_ARRAY_TYPE, [0] * max_dynamic_patch)) + array(VLLM_TOKEN_ID_ARRAY_TYPE, [0] * num_crops)) return DummyData(seq_data, None) with patch( @@ -66,15 +65,15 @@ def custom_dummy_data_factory(self, # Lazy import to avoid CUDA reinitialization error def mm_model_cls(): - from vllm.model_executor.models.internvl import InternVLChatModel + from vllm.model_executor.models.phi3v import Phi3VForCausalLM - return InternVLChatModel + return Phi3VForCausalLM # lambda whose signature matches max token calcs extra & mapper + extra kwargs -get_max_dynamic_patch = lambda ctx, *, max_dynamic_patch=DEFAULT_MAX_DYNAMIC_PATCH: max_dynamic_patch # noqa: E501 -custom_mapper = lambda ctx, data, *, max_dynamic_patch=DEFAULT_MAX_DYNAMIC_PATCH: { # noqa: E501 - "pixel_values": torch.zeros(size=(1, max_dynamic_patch + 1, 3, 448, 448)) +get_num_crops = lambda ctx, *, num_crops=DEFAULT_NUM_CROPS: num_crops +custom_mapper = lambda ctx, data, *, num_crops=DEFAULT_NUM_CROPS: { + "pixel_values": torch.zeros(size=(1, num_crops + 1, 3, 336, 336)) } @@ -89,28 +88,27 @@ def test_default_processor_is_a_noop(): assert proc_inputs is proc_outputs -def _get_max_dynamic_patch_info(init_max_dynamic_patch: int, - inference_max_dynamic_patch: int): - """Get the init / inference kwargs and expected max_dynamic_patch.""" - # If we have a value for max_dynamic_patch, pass the override value and make +def _get_num_crops_info(init_num_crops: int, inference_num_crops: int): + """Get the init / inference kwargs and expected num_crops for this test.""" + # If we have a value for num_crops, pass the override value and make # sure we get that value as a return-value from out mock processor, # otherwise fall back to the default value - init_kwargs = None if init_max_dynamic_patch is None else { - "max_dynamic_patch": init_max_dynamic_patch + init_kwargs = None if init_num_crops is None else { + "num_crops": init_num_crops } - inference_kwargs = None if inference_max_dynamic_patch is None else { - "max_dynamic_patch": inference_max_dynamic_patch + inference_kwargs = None if inference_num_crops is None else { + "num_crops": inference_num_crops } - if inference_max_dynamic_patch is not None: - expected_seq_count = inference_max_dynamic_patch - elif init_max_dynamic_patch is not None: - expected_seq_count = init_max_dynamic_patch + if inference_num_crops is not None: + expected_seq_count = inference_num_crops + elif init_num_crops is not None: + expected_seq_count = init_num_crops else: - expected_seq_count = DEFAULT_MAX_DYNAMIC_PATCH + expected_seq_count = DEFAULT_NUM_CROPS return init_kwargs, inference_kwargs, expected_seq_count -def _get_processed_max_dynamic_patch( +def _get_processed_num_crops( processor: Callable[[ProcessorInputs], ProcessorInputs], inference_kwargs: Optional[Dict[str, int]], ) -> int: @@ -122,30 +120,27 @@ def _get_processed_max_dynamic_patch( assert "type" in processed_inputs assert processed_inputs["type"] == "token" assert "mm_processor_kwargs" in processed_inputs - return processed_inputs["mm_processor_kwargs"]["max_dynamic_patch"] + return processed_inputs["mm_processor_kwargs"]["num_crops"] -@pytest.mark.parametrize( - "init_max_dynamic_patch,inference_max_dynamic_patch", [ - (None, None), - (MAX_DYNAMIC_PATCH_OVERRIDE, None), - (DEFAULT_MAX_DYNAMIC_PATCH, MAX_DYNAMIC_PATCH_OVERRIDE), - ]) -def test_input_processor_kwargs(use_processor_mock, init_max_dynamic_patch, - inference_max_dynamic_patch): +@pytest.mark.parametrize("init_num_crops,inference_num_crops", [ + (None, None), + (NUM_CROPS_OVERRIDE, None), + (DEFAULT_NUM_CROPS, NUM_CROPS_OVERRIDE), +]) +def test_input_processor_kwargs(use_processor_mock, init_num_crops, + inference_num_crops): """Ensure input processors can use processor kwargs.""" dummy_registry = InputRegistry() - (init_kwargs, inference_kwargs, - expected_seq_count) = _get_max_dynamic_patch_info( - init_max_dynamic_patch, inference_max_dynamic_patch) + init_kwargs, inference_kwargs, expected_seq_count = _get_num_crops_info( + init_num_crops, inference_num_crops) ctx = build_model_context(DUMMY_MODEL_ID, mm_processor_kwargs=init_kwargs) processor = dummy_registry.create_input_processor(ctx.model_config) - max_dynamic_patch_val = _get_processed_max_dynamic_patch( - processor, inference_kwargs) + num_crops_val = _get_processed_num_crops(processor, inference_kwargs) - assert max_dynamic_patch_val == expected_seq_count + assert num_crops_val == expected_seq_count @pytest.mark.parametrize( @@ -170,21 +165,18 @@ def test_processor_with_sad_kwarg_overrides(use_processor_mock, processor = dummy_registry.create_input_processor(ctx.model_config) # Should filter out the inference time kwargs - max_dynamic_patch_val = _get_processed_max_dynamic_patch( - processor, mm_processor_kwargs) - assert max_dynamic_patch_val == DEFAULT_MAX_DYNAMIC_PATCH + num_crops_val = _get_processed_num_crops(processor, mm_processor_kwargs) + assert num_crops_val == DEFAULT_NUM_CROPS ### Test overrides for the dummy data -@pytest.mark.parametrize("max_dynamic_patch", - [None, MAX_DYNAMIC_PATCH_OVERRIDE]) -def test_dummy_data_kwarg_overrides(use_dummy_data_mock, max_dynamic_patch): +@pytest.mark.parametrize("num_crops", [None, NUM_CROPS_OVERRIDE]) +def test_dummy_data_kwarg_overrides(use_dummy_data_mock, num_crops): """Ensure dummy data factories can use processor kwargs.""" - mm_processor_kwargs = None if max_dynamic_patch is None else { - "max_dynamic_patch": max_dynamic_patch + mm_processor_kwargs = None if num_crops is None else { + "num_crops": num_crops } - expected_seq_count = (DEFAULT_MAX_DYNAMIC_PATCH - if max_dynamic_patch is None else max_dynamic_patch) + expected_seq_count = DEFAULT_NUM_CROPS if num_crops is None else num_crops dummy_registry = InputRegistry() ctx = build_model_context(DUMMY_MODEL_ID, mm_processor_kwargs=mm_processor_kwargs) @@ -225,20 +217,17 @@ def test_dummy_data_with_sad_kwarg_overrides(use_dummy_data_mock, # len is solely dependent on the value of the mm_processor_kwargs. dummy_data = dummy_registry.dummy_data_for_profiling( ctx.model_config, seq_len=-1, mm_registry=mm_registry) - assert len( - dummy_data.seq_data.prompt_token_ids) == DEFAULT_MAX_DYNAMIC_PATCH + assert len(dummy_data.seq_data.prompt_token_ids) == DEFAULT_NUM_CROPS ### Test overrides for the max token count per multimodal instance -@pytest.mark.parametrize("max_dynamic_patch", - [None, MAX_DYNAMIC_PATCH_OVERRIDE]) -def test_max_tokens_kwarg_overrides(max_dynamic_patch): +@pytest.mark.parametrize("num_crops", [None, NUM_CROPS_OVERRIDE]) +def test_max_tokens_kwarg_overrides(num_crops): """Ensure max token calcs can use processor kwargs.""" - mm_processor_kwargs = None if max_dynamic_patch is None else { - "max_dynamic_patch": max_dynamic_patch + mm_processor_kwargs = None if num_crops is None else { + "num_crops": num_crops } - expected_seq_count = (DEFAULT_MAX_DYNAMIC_PATCH - if max_dynamic_patch is None else max_dynamic_patch) + expected_seq_count = DEFAULT_NUM_CROPS if num_crops is None else num_crops ctx = build_model_context(MULTIMODAL_MODEL_ID, task="generate", @@ -250,11 +239,11 @@ def test_max_tokens_kwarg_overrides(max_dynamic_patch): mm_registry.init_mm_limits_per_prompt(ctx.model_config) # Patch the image registry for phi3v with our lambda that is compatible # with overrides, then ensure that calling the method correctly echos - # our max_dynamic_patch value back from the mm_processor_kwargs. + # our num_crops value back from the mm_processor_kwargs. with patch.object( mm_registry._get_plugin("image"), "_max_mm_tokens", - {mm_model_cls(): get_max_dynamic_patch}, + {mm_model_cls(): get_num_crops}, ): max_multimodal_tokens = mm_registry.get_max_multimodal_tokens( ctx.model_config) @@ -290,29 +279,26 @@ def test_max_tokens_with_sad_kwarg_overrides(mm_processor_kwargs): with patch.object( mm_registry._get_plugin("image"), "_max_mm_tokens", - {mm_model_cls(): get_max_dynamic_patch}, + {mm_model_cls(): get_num_crops}, ): max_multimodal_tokens = mm_registry.get_max_multimodal_tokens( ctx.model_config) - assert max_multimodal_tokens == DEFAULT_MAX_DYNAMIC_PATCH + assert max_multimodal_tokens == DEFAULT_NUM_CROPS ### Test overrides for the mapper -@pytest.mark.parametrize( - "max_dynamic_patch", - [DEFAULT_MAX_DYNAMIC_PATCH, MAX_DYNAMIC_PATCH_OVERRIDE]) -def test_default_mapper_with_processor_kwargs(image_assets, max_dynamic_patch): +@pytest.mark.parametrize("num_crops", [DEFAULT_NUM_CROPS, NUM_CROPS_OVERRIDE]) +def test_default_mapper_with_processor_kwargs(image_assets, num_crops): """Ensure that the mapper processor kwargs can fall back to HF models.""" # NOTE - we don't validate bad inputs for the default mapper, because it's # through the automodel interface in transformers, so we can't easily # inspect what kwargs are or are not allowed. - ctx = build_model_context( - MULTIMODAL_MODEL_ID, - task="generate", - trust_remote_code=True, - mm_processor_kwargs={"max_dynamic_patch": max_dynamic_patch}, - limit_mm_per_prompt={"image": 1}) + ctx = build_model_context(MULTIMODAL_MODEL_ID, + task="generate", + trust_remote_code=True, + mm_processor_kwargs={"num_crops": num_crops}, + limit_mm_per_prompt={"image": 1}) mm_registry = MultiModalRegistry() mm_registry.init_mm_limits_per_prompt(ctx.model_config) @@ -321,22 +307,20 @@ def test_default_mapper_with_processor_kwargs(image_assets, max_dynamic_patch): mm_inputs = {"image": image} mapped_inputs = mm_registry.map_input(ctx.model_config, mm_inputs) - # pixel vals should have shape: [batch, max_dynamic_patch+1, ...] - assert mapped_inputs["pixel_values"].shape[1] == max_dynamic_patch + 1 + # Phi3v pixel vals should have shape: [batch, num_crops+1, 3, 336, 336] + assert mapped_inputs["pixel_values"].shape[1] == num_crops + 1 -@pytest.mark.parametrize( - "init_max_dynamic_patch,inference_max_dynamic_patch", [ - (None, None), - (MAX_DYNAMIC_PATCH_OVERRIDE, None), - (DEFAULT_MAX_DYNAMIC_PATCH, MAX_DYNAMIC_PATCH_OVERRIDE), - ]) -def test_custom_mapper_kwarg_overrides(image_assets, init_max_dynamic_patch, - inference_max_dynamic_patch): +@pytest.mark.parametrize("init_num_crops,inference_num_crops", [ + (None, None), + (NUM_CROPS_OVERRIDE, None), + (DEFAULT_NUM_CROPS, NUM_CROPS_OVERRIDE), +]) +def test_custom_mapper_kwarg_overrides(image_assets, init_num_crops, + inference_num_crops): """Ensure custom mappers can use processor kwargs.""" - (init_kwargs, inference_kwargs, - expected_seq_count) = _get_max_dynamic_patch_info( - init_max_dynamic_patch, inference_max_dynamic_patch) + init_kwargs, inference_kwargs, expected_seq_count = _get_num_crops_info( + init_num_crops, inference_num_crops) ctx = build_model_context(MULTIMODAL_MODEL_ID, task="generate", @@ -351,7 +335,7 @@ def test_custom_mapper_kwarg_overrides(image_assets, init_max_dynamic_patch, # Patch the image registry for phi3v with our lambda that is compatible # with overrides, then ensure that calling the method correctly echos - # our max_dynamic_patch value back from the mm_processor_kwargs. + # our num_crops value back from the mm_processor_kwargs. mm_registry._get_plugin("image").register_input_mapper(custom_mapper)( mm_model_cls()) mapped_inputs = mm_registry.map_input(ctx.model_config, mm_inputs, @@ -389,12 +373,11 @@ def test_custom_mapper_with_sad_kwarg_overrides(image_assets, # Patch the image registry for phi3v with our lambda that is compatible # with overrides, then ensure that calling the method correctly echos - # our max_dynamic_patch value back from the mm_processor_kwargs. + # our num_crops value back from the mm_processor_kwargs. mm_registry._get_plugin("image").register_input_mapper(custom_mapper)( mm_model_cls()) # Should filter out the inference time kwargs mapped_inputs = mm_registry.map_input( ctx.model_config, mm_inputs, mm_processor_kwargs=mm_processor_kwargs) - assert mapped_inputs["pixel_values"].shape[1] == ( - DEFAULT_MAX_DYNAMIC_PATCH + 1) + assert mapped_inputs["pixel_values"].shape[1] == DEFAULT_NUM_CROPS + 1 diff --git a/tests/plugins/vllm_add_dummy_model/vllm_add_dummy_model/my_llava.py b/tests/plugins/vllm_add_dummy_model/vllm_add_dummy_model/my_llava.py index 2f4194a63fc25..3ebd7864b8fc8 100644 --- a/tests/plugins/vllm_add_dummy_model/vllm_add_dummy_model/my_llava.py +++ b/tests/plugins/vllm_add_dummy_model/vllm_add_dummy_model/my_llava.py @@ -2,15 +2,19 @@ import torch +from vllm.inputs import INPUT_REGISTRY from vllm.model_executor.models.llava import (LlavaForConditionalGeneration, - LlavaProcessor, - get_max_llava_image_tokens) + dummy_data_for_llava, + get_max_llava_image_tokens, + input_processor_for_llava) from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY +@MULTIMODAL_REGISTRY.register_image_input_mapper() @MULTIMODAL_REGISTRY.register_max_image_tokens(get_max_llava_image_tokens) -@MULTIMODAL_REGISTRY.register_processor(LlavaProcessor) +@INPUT_REGISTRY.register_dummy_data(dummy_data_for_llava) +@INPUT_REGISTRY.register_input_processor(input_processor_for_llava) class MyLlava(LlavaForConditionalGeneration): def compute_logits( diff --git a/vllm/compilation/backends.py b/vllm/compilation/backends.py index f002a8ff905b1..1206424ae1e3f 100644 --- a/vllm/compilation/backends.py +++ b/vllm/compilation/backends.py @@ -265,13 +265,7 @@ def configure_post_pass(self): def __call__(self, graph: fx.GraphModule, example_inputs) -> Callable: - # when dynamo calls the backend, it means the bytecode - # transform and analysis are done compilation_counter.num_graphs_seen += 1 - from .monitor import torch_compile_start_time - dynamo_time = time.time() - torch_compile_start_time - logger.info("Dynamo bytecode transform time: %.2f s", dynamo_time) - self.compilation_configs.compilation_time += dynamo_time # we control the compilation process, each instance can only be # called once diff --git a/vllm/compilation/decorators.py b/vllm/compilation/decorators.py index 938430fe2a501..a32dced57e5b3 100644 --- a/vllm/compilation/decorators.py +++ b/vllm/compilation/decorators.py @@ -145,7 +145,6 @@ def _support_torch_compile( def __init__(self, *, vllm_config: VllmConfig, prefix: str = '', **kwargs): old_init(self, vllm_config=vllm_config, prefix=prefix, **kwargs) - self.vllm_config = vllm_config # for CompilationLevel.DYNAMO_AS_IS , the upper level model runner # will handle the compilation, so we don't need to do anything here. self.do_not_compile = \ @@ -158,6 +157,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = '', **kwargs): TorchCompileWrapperWithCustomDispatcher.__init__( self, compilation_level=vllm_config.compilation_config.level) + if vllm_config.compilation_config.level == CompilationLevel.PIECEWISE: + start_monitoring_torch_compile(vllm_config.compilation_config) + cls.__init__ = __init__ def __call__(self, *args, **kwargs): @@ -184,8 +186,6 @@ def __call__(self, *args, **kwargs): raise ValueError( "Unsupported dynamic dimensions" f" {dims} for argument {k} with type {type(arg)}.") - # here, it is the starting point of the `torch.compile` process - start_monitoring_torch_compile(self.vllm_config.compilation_config) # if we don't use custom dispatcher, we can directly call the # compiled function and let torch.compile handle the dispatching, diff --git a/vllm/compilation/monitor.py b/vllm/compilation/monitor.py index 3348674b09af2..f718e46423212 100644 --- a/vllm/compilation/monitor.py +++ b/vllm/compilation/monitor.py @@ -1,19 +1,14 @@ -import time - from vllm.config import CompilationConfig, CompilationLevel from vllm.logger import init_logger logger = init_logger(__name__) -torch_compile_start_time: float = 0.0 - def start_monitoring_torch_compile(compilation_config: CompilationConfig): - global torch_compile_start_time - torch_compile_start_time = time.time() + pass def end_monitoring_torch_compile(compilation_config: CompilationConfig): if compilation_config.level == CompilationLevel.PIECEWISE: - logger.info("torch.compile takes %.2f s in total", + logger.info("graph compilation takes %.2f s in total", compilation_config.compilation_time) diff --git a/vllm/config.py b/vllm/config.py index 4e5c755055f1f..d41f8752704ad 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -1,4 +1,3 @@ -import ast import copy import enum import hashlib @@ -28,8 +27,7 @@ get_hf_text_config, get_pooling_config, get_sentence_transformer_tokenizer_config, is_encoder_decoder, uses_mrope) from vllm.utils import (GiB_bytes, cuda_device_count_stateless, get_cpu_memory, - print_warning_once, random_uuid, - resolve_obj_by_qualname) + print_warning_once, resolve_obj_by_qualname) if TYPE_CHECKING: from ray.util.placement_group import PlacementGroup @@ -513,10 +511,11 @@ def verify_async_output_proc(self, parallel_config, speculative_config, # Reminder: Please update docs/source/usage/compatibility_matrix.rst # If the feature combo become valid - if not current_platform.is_async_output_supported(self.enforce_eager): + if device_config.device_type not in ("cuda", "tpu", "xpu", "hpu"): logger.warning( - "Async output processing is not supported on the " - "current platform type %s.", current_platform.device_type) + "Async output processing is only supported for CUDA, TPU, XPU " + "and HPU." + "Disabling it for other platforms.") self.use_async_output_proc = False return @@ -526,6 +525,16 @@ def verify_async_output_proc(self, parallel_config, speculative_config, self.use_async_output_proc = False return + # Reminder: Please update docs/source/usage/compatibility_matrix.rst + # If the feature combo become valid + if device_config.device_type == "cuda" and self.enforce_eager: + logger.warning( + "To see benefits of async output processing, enable CUDA " + "graph. Since, enforce-eager is enabled, async output " + "processor cannot be used") + self.use_async_output_proc = not self.enforce_eager + return + # Async postprocessor is not necessary with embedding mode # since there is no token generation if self.task == "embedding": @@ -2093,7 +2102,7 @@ class KVTransferConfig(BaseModel): @classmethod def from_cli(cls, cli_value: str) -> "KVTransferConfig": - """Parse the CLI value for the kv cache transfer config.""" + """Parse the CLI value for the compilation config.""" return KVTransferConfig.model_validate_json(cli_value) def model_post_init(self, __context: Any) -> None: @@ -2186,8 +2195,8 @@ class CompilationConfig(BaseModel): TODO: move outside cudagraph logic into compilation. torch.compile will handle cudagraph capture logic in the future. - cudagraph_capture_sizes: sizes to capture cudagraph. - - None (default): capture sizes are inferred from vllm config. - - List[int]: capture sizes are specified as given. + - None: capture sizes are inferred from compilation context. + - List[int]: capture sizes are specified. - cudagraph_num_of_warmups: number of warmup runs for cudagraph. It means the first several runs will be treated as warmup runs. Only after that, the execution will be recorded, and the recorded @@ -2201,10 +2210,14 @@ class CompilationConfig(BaseModel): - use_inductor: whether to use inductor compilation. - False: inductor compilation is not used. graph runs in eager. - True: inductor compilation is used. one graph for symbolic shape - is compiled. In addition, compile for cudagraph sizes that are - in candidate_compile_sizes, using configurations + is compiled. In addition, compile for different sizes specified + in inductor_compile_sizes, using configurations in inductor_compile_config. - - candidate_compile_sizes: sizes to compile for inductor. + - inductor_compile_sizes: sizes to compile for inductor. + - inductor_specialize_for_cudagraph_no_more_than: an optional integer + to specialize inductor for cudagraph sizes no more than the + specified size. It is useful when we want to specialize inductor + with a subset of cudagraph sizes. - inductor_compile_config: additional configurations for inductor. - None: use default configurations. - inductor_passes: additional passes for inductor. It is a dictionary @@ -2233,7 +2246,8 @@ class CompilationConfig(BaseModel): ]) use_inductor: bool = True - candidate_compile_sizes: Optional[List[int]] = Field(default=None) + inductor_specialize_for_cudagraph_no_more_than: Optional[int] = None + inductor_compile_sizes: Optional[List[int]] = Field(default=None) inductor_compile_config: Dict = Field(default_factory=dict) inductor_passes: Dict[str, str] = Field(default_factory=dict) @@ -2299,9 +2313,7 @@ def from_cli(cls, cli_value: str) -> "CompilationConfig": """Parse the CLI value for the compilation config.""" if cli_value in ["0", "1", "2", "3"]: return cls(level=int(cli_value)) - # do not use `eval`, it is dangerous and can execute arbitrary code - dict_value = ast.literal_eval(cli_value) - return CompilationConfig.model_validate(dict_value) + return CompilationConfig.model_validate_json(cli_value) def model_post_init(self, __context: Any) -> None: @@ -2362,20 +2374,18 @@ def init_with_cudagraph_sizes(self, sizes_to_specialize: List[int]): logger.info(("cudagraph sizes specified by model runner" " %s is overridden by config %s"), sizes_to_specialize, self.cudagraph_capture_sizes) - - if self.candidate_compile_sizes is None: - self.candidate_compile_sizes = [] - self.compile_sizes = [ - x for x in self.candidate_compile_sizes if x in self.capture_sizes - ] - ignored_sizes = [ - x for x in self.candidate_compile_sizes - if x not in self.capture_sizes - ] - if ignored_sizes: - logger.warning(("candidate_compile_sizes %s are ignored " - "because they are not cudagraph capture sizes."), - ignored_sizes) + if self.inductor_specialize_for_cudagraph_no_more_than is not None: + assert self.inductor_compile_sizes is None, ( + "inductor_compile_sizes should be None when " + "inductor_specialize_for_cudagraph_no_more_than is not None") + self.compile_sizes = [ + x for x in self.capture_sizes + if x <= self.inductor_specialize_for_cudagraph_no_more_than + ] + else: + if self.inductor_compile_sizes is None: + self.inductor_compile_sizes = [] + self.compile_sizes = self.inductor_compile_sizes # sort to make sure cudagraph capture sizes are in descending order self.capture_sizes.sort(reverse=True) @@ -2418,7 +2428,6 @@ class VllmConfig: init=True) # type: ignore kv_transfer_config: KVTransferConfig = field(default=None, init=True) # type: ignore - instance_id: str = "" @staticmethod def get_graph_batch_size(batch_size: int) -> int: @@ -2483,15 +2492,7 @@ def _get_quantization_config( return quant_config return None - def with_hf_config( - self, - hf_config: PretrainedConfig, - architectures: Optional[list[str]] = None, - ) -> "VllmConfig": - if architectures is not None: - hf_config = copy.deepcopy(hf_config) - hf_config.architectures = architectures - + def with_hf_config(self, hf_config: PretrainedConfig) -> "VllmConfig": model_config = copy.deepcopy(self.model_config) model_config.hf_config = hf_config @@ -2542,7 +2543,6 @@ def __post_init__(self): self.compilation_config.custom_ops = ["none"] self.compilation_config.use_cudagraph = True self.compilation_config.use_inductor = True - self.compilation_config.cudagraph_num_of_warmups = 1 self.compilation_config.pass_config.enable_fusion = False self.compilation_config.pass_config.enable_reshape = False self.compilation_config.level = CompilationLevel.PIECEWISE @@ -2585,44 +2585,46 @@ def __post_init__(self): current_platform.check_and_update_config(self) - if not self.instance_id: - self.instance_id = random_uuid()[:5] - def __str__(self): - return ( - f"model={self.model_config.model!r}," - f" speculative_config={self.speculative_config!r}," - f" tokenizer={self.model_config.tokenizer!r}, " - f"skip_tokenizer_init={self.model_config.skip_tokenizer_init}," - f" tokenizer_mode={self.model_config.tokenizer_mode}, " - f"revision={self.model_config.revision}, " - f"override_neuron_config={self.model_config.override_neuron_config}," - f" tokenizer_revision={self.model_config.tokenizer_revision}, " - f"trust_remote_code={self.model_config.trust_remote_code}, " - f"dtype={self.model_config.dtype}, " - f"max_seq_len={self.model_config.max_model_len}," - f" download_dir={self.load_config.download_dir!r}, " - f"load_format={self.load_config.load_format}, " - f"tensor_parallel_size={self.parallel_config.tensor_parallel_size}," - f" pipeline_parallel_size={self.parallel_config.pipeline_parallel_size}, " # noqa - f"disable_custom_all_reduce={self.parallel_config.disable_custom_all_reduce}, " # noqa - f"quantization={self.model_config.quantization}, " - f"enforce_eager={self.model_config.enforce_eager}, " - f"kv_cache_dtype={self.cache_config.cache_dtype}, " - f"quantization_param_path={self.model_config.quantization_param_path}," - f" device_config={self.device_config.device}, " - f"decoding_config={self.decoding_config!r}, " - f"observability_config={self.observability_config!r}, " - f"seed={self.model_config.seed}, " - f"served_model_name={self.model_config.served_model_name}, " - f"num_scheduler_steps={self.scheduler_config.num_scheduler_steps}, " - f"multi_step_stream_outputs={self.scheduler_config.multi_step_stream_outputs}, " # noqa - f"enable_prefix_caching={self.cache_config.enable_prefix_caching}, " - f"chunked_prefill_enabled={self.scheduler_config.chunked_prefill_enabled}, " # noqa - f"use_async_output_proc={self.model_config.use_async_output_proc}, " - f"mm_processor_kwargs={self.model_config.mm_processor_kwargs}, " - f"pooler_config={self.model_config.pooler_config!r}," - f" compilation_config={self.compilation_config!r}") + return ("model=%r, speculative_config=%r, tokenizer=%r, " + "skip_tokenizer_init=%s, tokenizer_mode=%s, revision=%s, " + "override_neuron_config=%s, tokenizer_revision=%s, " + "trust_remote_code=%s, dtype=%s, max_seq_len=%d, " + "download_dir=%r, load_format=%s, tensor_parallel_size=%d, " + "pipeline_parallel_size=%d, " + "disable_custom_all_reduce=%s, quantization=%s, " + "enforce_eager=%s, kv_cache_dtype=%s, " + "quantization_param_path=%s, device_config=%s, " + "decoding_config=%r, observability_config=%r, " + "seed=%d, served_model_name=%s, " + "num_scheduler_steps=%d, enable_prefix_caching=%s, " + "use_async_output_proc=%s, mm_processor_kwargs=%s") % \ + (self.model_config.model, self.speculative_config, + self.model_config.tokenizer, + self.model_config.skip_tokenizer_init, + self.model_config.tokenizer_mode, + self.model_config.revision, + self.model_config.override_neuron_config, + self.model_config.tokenizer_revision, + self.model_config.trust_remote_code, + self.model_config.dtype, + self.model_config.max_model_len, + self.load_config.download_dir, + self.load_config.load_format, + self.parallel_config.tensor_parallel_size, + self.parallel_config.pipeline_parallel_size, + self.parallel_config.disable_custom_all_reduce, + self.model_config.quantization, + self.model_config.enforce_eager, + self.cache_config.cache_dtype, + self.model_config.quantization_param_path, + self.device_config.device, self.decoding_config, + self.observability_config, self.model_config.seed, + self.model_config.served_model_name, + self.scheduler_config.num_scheduler_steps, + self.cache_config.enable_prefix_caching, + self.model_config.use_async_output_proc, + self.model_config.mm_processor_kwargs) _current_vllm_config: Optional[VllmConfig] = None diff --git a/vllm/distributed/device_communicators/shm_broadcast.py b/vllm/distributed/device_communicators/shm_broadcast.py index 9a2d8918d96e5..2ff1a1ead99c1 100644 --- a/vllm/distributed/device_communicators/shm_broadcast.py +++ b/vllm/distributed/device_communicators/shm_broadcast.py @@ -1,11 +1,10 @@ import os import pickle -import sys import time from contextlib import contextmanager from dataclasses import dataclass, field from multiprocessing import shared_memory -from typing import List, Optional, Tuple +from typing import List, Optional from unittest.mock import patch import torch @@ -22,20 +21,6 @@ logger = init_logger(__name__) -# We prefer to use os.sched_yield as it results in tighter polling loops, -# measured to be around 3e-7 seconds. However on earlier versions of Python -# os.sched_yield() does not release the GIL, so we fall back to time.sleep(0) -USE_SCHED_YIELD = ((sys.version_info[:3] >= (3, 11, 1)) - or (sys.version_info[:2] == (3, 10) - and sys.version_info[2] >= 8)) - - -def sched_yield(): - if USE_SCHED_YIELD: - os.sched_yield() - else: - time.sleep(0) - class ShmRingBuffer: @@ -129,14 +114,11 @@ def __init__(self, # and we should suppress the error pass - def handle(self): - return (self.n_reader, self.max_chunk_bytes, self.max_chunks, - self.shared_memory.name) - def __reduce__(self): return ( self.__class__, - self.handle(), + (self.n_reader, self.max_chunk_bytes, self.max_chunks, + self.shared_memory.name), ) def __del__(self): @@ -165,7 +147,7 @@ class Handle: connect_ip: str local_reader_ranks: List[int] = field(default_factory=list) - buffer_handle: Optional[Tuple[int, int, int, str]] = None + buffer: Optional[ShmRingBuffer] = None local_subscribe_port: Optional[int] = None remote_subscribe_port: Optional[int] = None @@ -246,7 +228,7 @@ def __init__( self.handle = Handle( connect_ip=connect_ip, local_reader_ranks=local_reader_ranks, - buffer_handle=self.buffer.handle(), + buffer=self.buffer, local_subscribe_port=local_subscribe_port, remote_subscribe_port=remote_subscribe_port, ) @@ -265,8 +247,8 @@ def create_from_handle(handle: Handle, rank) -> "MessageQueue": context = Context() if rank in handle.local_reader_ranks: - assert handle.buffer_handle is not None - self.buffer = ShmRingBuffer(*handle.buffer_handle) + assert handle.buffer is not None + self.buffer = handle.buffer self.current_idx = 0 self.local_reader_rank = handle.local_reader_ranks.index(rank) self._is_local_reader = True @@ -332,7 +314,7 @@ def wait_until_ready(self): assert recv == b"READY" @contextmanager - def acquire_write(self, timeout: Optional[float] = None): + def acquire_write(self): assert self._is_writer, "Only writers can acquire write" start_time = time.monotonic() n_warning = 1 @@ -347,20 +329,16 @@ def acquire_write(self, timeout: Optional[float] = None): # we need to wait until it is read by all readers # Release the processor to other threads - sched_yield() + os.sched_yield() - # if we wait for a long time, log a message + # if we wait for a long time, we should warn the user if (time.monotonic() - start_time > VLLM_RINGBUFFER_WARNING_INTERVAL * n_warning): - logger.debug("No available block found in %s second. ", - VLLM_RINGBUFFER_WARNING_INTERVAL) + logger.warning( + "No available block found in %s second. ", + VLLM_RINGBUFFER_WARNING_INTERVAL) n_warning += 1 - # if we time out, raise an exception - if (timeout is not None - and time.monotonic() - start_time > timeout): - raise TimeoutError - continue # found a block that is either # (1) not written @@ -387,7 +365,7 @@ def acquire_write(self, timeout: Optional[float] = None): break @contextmanager - def acquire_read(self, timeout: Optional[float] = None): + def acquire_read(self): assert self._is_local_reader, "Only readers can acquire read" start_time = time.monotonic() n_warning = 1 @@ -405,20 +383,16 @@ def acquire_read(self, timeout: Optional[float] = None): # we need to wait until it is written # Release the processor to other threads - sched_yield() + os.sched_yield() - # if we wait for a long time, log a message + # if we wait for a long time, we should warn the user if (time.monotonic() - start_time > VLLM_RINGBUFFER_WARNING_INTERVAL * n_warning): - logger.debug("No available block found in %s second. ", - VLLM_RINGBUFFER_WARNING_INTERVAL) + logger.warning( + "No available block found in %s second. ", + VLLM_RINGBUFFER_WARNING_INTERVAL) n_warning += 1 - # if we time out, raise an exception - if (timeout is not None - and time.monotonic() - start_time > timeout): - raise TimeoutError - continue # found a block that is not read by this reader # let caller read from the buffer @@ -432,26 +406,24 @@ def acquire_read(self, timeout: Optional[float] = None): 1) % self.buffer.max_chunks break - def enqueue(self, obj, timeout: Optional[float] = None): - """ Write to message queue with optional timeout (in seconds) """ + def enqueue(self, obj): assert self._is_writer, "Only writers can enqueue" serialized_obj = pickle.dumps(obj, protocol=pickle.HIGHEST_PROTOCOL) if self.n_local_reader > 0: if len(serialized_obj) >= self.buffer.max_chunk_bytes: - with self.acquire_write(timeout) as buf: + with self.acquire_write() as buf: buf[0] = 1 # overflow self.local_socket.send(serialized_obj) else: - with self.acquire_write(timeout) as buf: + with self.acquire_write() as buf: buf[0] = 0 # not overflow buf[1:len(serialized_obj) + 1] = serialized_obj if self.n_remote_reader > 0: self.remote_socket.send(serialized_obj) - def dequeue(self, timeout: Optional[float] = None): - """ Read from message queue with optional timeout (in seconds) """ + def dequeue(self): if self._is_local_reader: - with self.acquire_read(timeout) as buf: + with self.acquire_read() as buf: overflow = buf[0] == 1 if not overflow: # no need to know the size of serialized object diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 9f932c6f26eaa..8fd96aad25357 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -212,9 +212,12 @@ def __post_init__(self): # support `EngineArgs(compilation_config={...})` # without having to manually construct a # CompilationConfig object - if isinstance(self.compilation_config, (int, dict)): + if isinstance(self.compilation_config, (int)): self.compilation_config = CompilationConfig.from_cli( str(self.compilation_config)) + elif isinstance(self.compilation_config, (dict)): + self.compilation_config = CompilationConfig.from_cli( + json.dumps(self.compilation_config)) # Setup plugins from vllm.plugins import load_general_plugins @@ -440,7 +443,6 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: 'capping to sliding window size') parser.add_argument('--use-v2-block-manager', action='store_true', - default=True, help='[DEPRECATED] block manager v1 has been ' 'removed and SelfAttnBlockSpaceManager (i.e. ' 'block manager v2) is now the default. ' @@ -1077,12 +1079,9 @@ def create_engine_config(self, # long context (> 32K) models. This is to avoid OOM errors in the # initial memory profiling phase. - # For multimodal models, chunked prefill is disabled by default in - # V0, but enabled by design in V1 - if model_config.is_multimodal_model: - self.enable_chunked_prefill = bool(envs.VLLM_USE_V1) - - elif use_long_context: + # Chunked prefill is currently disabled for multimodal models by + # default. + if use_long_context and not model_config.is_multimodal_model: is_gpu = device_config.device_type == "cuda" use_sliding_window = (model_config.get_sliding_window() is not None) @@ -1275,9 +1274,12 @@ def _override_v1_engine_config(self, engine_config: VllmConfig) -> None: Override the EngineConfig's configs based on the usage context for V1. """ assert envs.VLLM_USE_V1, "V1 is not enabled" + # TODO (ywang96): Enable APC by default when VLM supports it. if engine_config.model_config.is_multimodal_model: - # TODO (ywang96): Enable APC by default when VLM supports it. - assert not engine_config.cache_config.enable_prefix_caching + logger.warning( + "Prefix caching is currently not supported for multimodal " + "models and has been disabled.") + engine_config.cache_config.enable_prefix_caching = False @dataclass diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 34044b358faca..36dc41955d887 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -233,7 +233,6 @@ def __init__( use_cached_outputs: bool = False, ) -> None: - self.vllm_config = vllm_config self.model_config = vllm_config.model_config self.cache_config = vllm_config.cache_config self.lora_config = vllm_config.lora_config @@ -249,12 +248,61 @@ def __init__( ) logger.info( - "Initializing an LLM engine (v%s) with config: %r," - "use_cached_outputs=%s, ", + "Initializing an LLM engine (v%s) with config: " + "model=%r, speculative_config=%r, tokenizer=%r, " + "skip_tokenizer_init=%s, tokenizer_mode=%s, revision=%s, " + "override_neuron_config=%s, tokenizer_revision=%s, " + "trust_remote_code=%s, dtype=%s, max_seq_len=%d, " + "download_dir=%r, load_format=%s, tensor_parallel_size=%d, " + "pipeline_parallel_size=%d, " + "disable_custom_all_reduce=%s, quantization=%s, " + "weights_load_device=%s, enforce_eager=%s, kv_cache_dtype=%s, " + "quantization_param_path=%s, device_config=%s, " + "decoding_config=%r, observability_config=%r, " + "seed=%d, served_model_name=%s, " + "num_scheduler_steps=%d, chunked_prefill_enabled=%s " + "multi_step_stream_outputs=%s, enable_prefix_caching=%s, " + "use_async_output_proc=%s, use_cached_outputs=%s, " + "mm_processor_kwargs=%s, pooler_config=%r," + "compilation_config=%r", VLLM_VERSION, - vllm_config, + self.model_config.model, + self.speculative_config, + self.model_config.tokenizer, + self.model_config.skip_tokenizer_init, + self.model_config.tokenizer_mode, + self.model_config.revision, + self.model_config.override_neuron_config, + self.model_config.tokenizer_revision, + self.model_config.trust_remote_code, + self.model_config.dtype, + self.model_config.max_model_len, + self.load_config.download_dir, + self.load_config.load_format, + self.parallel_config.tensor_parallel_size, + self.parallel_config.pipeline_parallel_size, + self.parallel_config.disable_custom_all_reduce, + self.model_config.quantization, + self.load_config.device, + self.model_config.enforce_eager, + self.cache_config.cache_dtype, + self.model_config.quantization_param_path, + self.device_config.device, + self.decoding_config, + self.observability_config, + self.model_config.seed, + self.model_config.served_model_name, + self.scheduler_config.num_scheduler_steps, + self.scheduler_config.chunked_prefill_enabled, + self.scheduler_config.multi_step_stream_outputs, + self.cache_config.enable_prefix_caching, + self.model_config.use_async_output_proc, use_cached_outputs, + self.model_config.mm_processor_kwargs, + self.model_config.pooler_config, + vllm_config.compilation_config, ) + # TODO(woosuk): Print more configs in debug mode. self.log_stats = log_stats self.use_cached_outputs = use_cached_outputs @@ -387,14 +435,13 @@ def get_tokenizer_for_seq(sequence: Sequence) -> AnyTokenizer: self.stat_loggers = { "logging": LoggingStatLogger( - local_interval=_LOCAL_LOGGING_INTERVAL_SEC, - vllm_config=vllm_config), + local_interval=_LOCAL_LOGGING_INTERVAL_SEC), "prometheus": PrometheusStatLogger( local_interval=_LOCAL_LOGGING_INTERVAL_SEC, labels=dict( model_name=self.model_config.served_model_name), - vllm_config=vllm_config), + max_model_len=self.model_config.max_model_len), } self.stat_loggers["prometheus"].info("cache_config", self.cache_config) @@ -684,10 +731,12 @@ def stop_remote_worker_execution_loop(self) -> None: self.model_executor.stop_remote_worker_execution_loop() @overload + @deprecated("'inputs' will be renamed to 'prompt") def add_request( self, request_id: str, - prompt: PromptType, + *, + inputs: PromptType, params: Union[SamplingParams, PoolingParams], arrival_time: Optional[float] = None, lora_request: Optional[LoRARequest] = None, @@ -698,12 +747,10 @@ def add_request( ... @overload - @deprecated("'inputs' will be renamed to 'prompt") def add_request( self, request_id: str, - *, - inputs: PromptType, + prompt: PromptType, params: Union[SamplingParams, PoolingParams], arrival_time: Optional[float] = None, lora_request: Optional[LoRARequest] = None, diff --git a/vllm/engine/metrics.py b/vllm/engine/metrics.py index c8aec8dd3afa3..a5ae21c3966a7 100644 --- a/vllm/engine/metrics.py +++ b/vllm/engine/metrics.py @@ -6,7 +6,6 @@ import numpy as np import prometheus_client -from vllm.config import VllmConfig from vllm.engine.metrics_types import (StatLoggerBase, Stats, SupportsMetricsInfo) from vllm.executor.ray_utils import ray @@ -45,12 +44,10 @@ class Metrics: _counter_cls = prometheus_client.Counter _histogram_cls = prometheus_client.Histogram - def __init__(self, labelnames: List[str], vllm_config: VllmConfig): + def __init__(self, labelnames: List[str], max_model_len: int): # Unregister any existing vLLM collectors (for CI/CD) self._unregister_vllm_metrics() - max_model_len = vllm_config.model_config.max_model_len - # System stats # Scheduler State self.gauge_scheduler_running = self._gauge_cls( @@ -118,15 +115,11 @@ def __init__(self, labelnames: List[str], vllm_config: VllmConfig): name="vllm:tokens_total", documentation="Number of prefill plus generation tokens processed.", labelnames=labelnames) - buckets = [1, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8096] - if not vllm_config.model_config.enforce_eager: - buckets = vllm_config.compilation_config.capture_sizes.copy() - buckets.sort() self.histogram_iteration_tokens = self._histogram_cls( name="vllm:iteration_tokens_total", documentation="Histogram of number of tokens per engine_step.", labelnames=labelnames, - buckets=buckets) + buckets=[1, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8096]) self.histogram_time_to_first_token = self._histogram_cls( name="vllm:time_to_first_token_seconds", documentation="Histogram of time to first token in seconds.", @@ -368,10 +361,10 @@ class RayMetrics(Metrics): _histogram_cls: Type[prometheus_client.Histogram] = cast( Type[prometheus_client.Histogram], _RayHistogramWrapper) - def __init__(self, labelnames: List[str], vllm_config: VllmConfig): + def __init__(self, labelnames: List[str], max_model_len: int): if ray_metrics is None: raise ImportError("RayMetrics requires Ray to be installed.") - super().__init__(labelnames, vllm_config) + super().__init__(labelnames, max_model_len) def _unregister_vllm_metrics(self) -> None: # No-op on purpose @@ -428,8 +421,8 @@ def get_throughput(tracked_stats: List[int], now: float, class LoggingStatLogger(StatLoggerBase): """LoggingStatLogger is used in LLMEngine to log to Stdout.""" - def __init__(self, local_interval: float, vllm_config: VllmConfig) -> None: - super().__init__(local_interval, vllm_config) + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) self.last_prompt_throughput: Optional[float] = None self.last_generation_throughput: Optional[float] = None @@ -522,12 +515,12 @@ class PrometheusStatLogger(StatLoggerBase): _gauge_cls = prometheus_client.Gauge def __init__(self, local_interval: float, labels: Dict[str, str], - vllm_config: VllmConfig) -> None: - super().__init__(local_interval, vllm_config) + max_model_len: int) -> None: + super().__init__(local_interval) # Prometheus metrics self.labels = labels self.metrics = self._metrics_cls(labelnames=list(labels.keys()), - vllm_config=vllm_config) + max_model_len=max_model_len) def _log_gauge(self, gauge, data: Union[int, float]) -> None: # Convenience function for logging to gauge. diff --git a/vllm/engine/metrics_types.py b/vllm/engine/metrics_types.py index 5c7a430d11c5a..5f7ec3bbcb269 100644 --- a/vllm/engine/metrics_types.py +++ b/vllm/engine/metrics_types.py @@ -16,7 +16,6 @@ from dataclasses import dataclass from typing import Dict, List, Optional, Protocol -from vllm.config import VllmConfig from vllm.spec_decode.metrics import SpecDecodeWorkerMetrics @@ -78,7 +77,7 @@ def metrics_info(self) -> Dict[str, str]: class StatLoggerBase(ABC): """Base class for StatLogger.""" - def __init__(self, local_interval: float, vllm_config: VllmConfig) -> None: + def __init__(self, local_interval: float) -> None: # Tracked stats over current local logging interval. self.num_prompt_tokens: List[int] = [] self.num_generation_tokens: List[int] = [] diff --git a/vllm/engine/multiprocessing/__init__.py b/vllm/engine/multiprocessing/__init__.py index 420f540d0b5f4..7020012e8bb86 100644 --- a/vllm/engine/multiprocessing/__init__.py +++ b/vllm/engine/multiprocessing/__init__.py @@ -35,9 +35,11 @@ class RPCProcessRequest: priority: int = 0 @overload + @deprecated("'inputs' will be renamed to 'prompt") def __init__( self, - prompt: PromptType, + *, + inputs: PromptType, params: Union[SamplingParams, PoolingParams], request_id: str, lora_request: Optional[LoRARequest] = None, @@ -48,11 +50,9 @@ def __init__( ... @overload - @deprecated("'inputs' will be renamed to 'prompt") def __init__( self, - *, - inputs: PromptType, + prompt: PromptType, params: Union[SamplingParams, PoolingParams], request_id: str, lora_request: Optional[LoRARequest] = None, diff --git a/vllm/engine/multiprocessing/client.py b/vllm/engine/multiprocessing/client.py index 32bd83305bb8f..7e4f81b2cf8e2 100644 --- a/vllm/engine/multiprocessing/client.py +++ b/vllm/engine/multiprocessing/client.py @@ -415,9 +415,11 @@ def dead_error(self) -> BaseException: return ENGINE_DEAD_ERROR(self._errored_with) @overload + @deprecated("'inputs' will be renamed to 'prompt") def generate( self, - prompt: PromptType, + *, + inputs: PromptType, sampling_params: SamplingParams, request_id: str, lora_request: Optional[LoRARequest] = None, @@ -428,11 +430,9 @@ def generate( ... @overload - @deprecated("'inputs' will be renamed to 'prompt") def generate( self, - *, - inputs: PromptType, + prompt: PromptType, sampling_params: SamplingParams, request_id: str, lora_request: Optional[LoRARequest] = None, @@ -487,9 +487,11 @@ def generate( prompt_adapter_request, priority) @overload + @deprecated("'inputs' will be renamed to 'prompt") def encode( self, - prompt: PromptType, + *, + inputs: PromptType, pooling_params: PoolingParams, request_id: str, lora_request: Optional[LoRARequest] = None, @@ -499,11 +501,9 @@ def encode( ... @overload - @deprecated("'inputs' will be renamed to 'prompt") def encode( self, - *, - inputs: PromptType, + prompt: PromptType, pooling_params: PoolingParams, request_id: str, lora_request: Optional[LoRARequest] = None, diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index 2a02187223a33..65fa9873df28c 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -1,4 +1,5 @@ import itertools +import json import warnings from contextlib import contextmanager from typing import (Any, ClassVar, Dict, List, Optional, Sequence, Tuple, Type, @@ -185,9 +186,12 @@ def __init__( kwargs["disable_log_stats"] = True if compilation_config is not None: - if isinstance(compilation_config, (int, dict)): + if isinstance(compilation_config, (int)): compilation_config_instance = CompilationConfig.from_cli( str(compilation_config)) + elif isinstance(compilation_config, (dict)): + compilation_config_instance = CompilationConfig.from_cli( + json.dumps(compilation_config)) else: compilation_config_instance = compilation_config else: @@ -252,21 +256,8 @@ def set_tokenizer(self, tokenizer: AnyTokenizer) -> None: else: tokenizer_group.tokenizer = get_cached_tokenizer(tokenizer) - @overload - def generate( - self, - prompts: Union[PromptType, Sequence[PromptType]], - /, - *, - sampling_params: Optional[Union[SamplingParams, - Sequence[SamplingParams]]] = None, - use_tqdm: bool = True, - lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, - ) -> List[RequestOutput]: - ... - @overload # LEGACY: single (prompt + optional token ids) - @deprecated("'prompt_token_ids' will become part of 'prompts'") + @deprecated("'prompt_token_ids' will become part of 'prompts") def generate( self, prompts: str, @@ -279,7 +270,7 @@ def generate( ... @overload # LEGACY: multi (prompt + optional token ids) - @deprecated("'prompt_token_ids' will become part of 'prompts'") + @deprecated("'prompt_token_ids' will become part of 'prompts") def generate( self, prompts: List[str], @@ -292,7 +283,7 @@ def generate( ... @overload # LEGACY: single (token ids + optional prompt) - @deprecated("'prompt_token_ids' will become part of 'prompts'") + @deprecated("'prompt_token_ids' will become part of 'prompts") def generate( self, prompts: Optional[str] = None, @@ -306,7 +297,7 @@ def generate( ... @overload # LEGACY: multi (token ids + optional prompt) - @deprecated("'prompt_token_ids' will become part of 'prompts'") + @deprecated("'prompt_token_ids' will become part of 'prompts") def generate( self, prompts: Optional[List[str]] = None, @@ -320,7 +311,7 @@ def generate( ... @overload # LEGACY: single or multi token ids [pos-only] - @deprecated("'prompt_token_ids' will become part of 'prompts'") + @deprecated("'prompt_token_ids' will become part of 'prompts") def generate( self, prompts: None, @@ -331,6 +322,19 @@ def generate( ) -> List[RequestOutput]: ... + @overload + def generate( + self, + prompts: Union[PromptType, Sequence[PromptType]], + /, + *, + sampling_params: Optional[Union[SamplingParams, + Sequence[SamplingParams]]] = None, + use_tqdm: bool = True, + lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, + ) -> List[RequestOutput]: + ... + @deprecate_kwargs( "prompt_token_ids", is_deprecated=lambda: LLM.DEPRECATE_LEGACY, @@ -672,21 +676,8 @@ def chat( lora_request=lora_request, ) - @overload - def encode( - self, - prompts: Union[PromptType, Sequence[PromptType]], - /, - *, - pooling_params: Optional[Union[PoolingParams, - Sequence[PoolingParams]]] = None, - use_tqdm: bool = True, - lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, - ) -> List[PoolingRequestOutput]: - ... - @overload # LEGACY: single (prompt + optional token ids) - @deprecated("'prompt_token_ids' will become part of 'prompts'") + @deprecated("'prompt_token_ids' will become part of 'prompts") def encode( self, prompts: str, @@ -699,7 +690,7 @@ def encode( ... @overload # LEGACY: multi (prompt + optional token ids) - @deprecated("'prompt_token_ids' will become part of 'prompts'") + @deprecated("'prompt_token_ids' will become part of 'prompts") def encode( self, prompts: List[str], @@ -712,7 +703,7 @@ def encode( ... @overload # LEGACY: single (token ids + optional prompt) - @deprecated("'prompt_token_ids' will become part of 'prompts'") + @deprecated("'prompt_token_ids' will become part of 'prompts") def encode( self, prompts: Optional[str] = None, @@ -726,7 +717,7 @@ def encode( ... @overload # LEGACY: multi (token ids + optional prompt) - @deprecated("'prompt_token_ids' will become part of 'prompts'") + @deprecated("'prompt_token_ids' will become part of 'prompts") def encode( self, prompts: Optional[List[str]] = None, @@ -740,7 +731,7 @@ def encode( ... @overload # LEGACY: single or multi token ids [pos-only] - @deprecated("'prompt_token_ids' will become part of 'prompts'") + @deprecated("'prompt_token_ids' will become part of 'prompts") def encode( self, prompts: None, @@ -751,6 +742,19 @@ def encode( ) -> List[PoolingRequestOutput]: ... + @overload + def encode( + self, + prompts: Union[PromptType, Sequence[PromptType]], + /, + *, + pooling_params: Optional[Union[PoolingParams, + Sequence[PoolingParams]]] = None, + use_tqdm: bool = True, + lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, + ) -> List[PoolingRequestOutput]: + ... + @deprecate_kwargs( "prompt_token_ids", is_deprecated=lambda: LLM.DEPRECATE_LEGACY, diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index 0f93eb54111ad..6bc31ef83ded4 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -175,8 +175,8 @@ async def build_async_engine_client_from_engine_args( # Select random path for IPC. ipc_path = get_open_zmq_ipc_path() - logger.debug("Multiprocessing frontend to use %s for IPC Path.", - ipc_path) + logger.info("Multiprocessing frontend to use %s for IPC Path.", + ipc_path) # Start RPCServer in separate process (holds the LLMEngine). # the current process might have CUDA context, @@ -249,8 +249,8 @@ def mount_metrics(app: FastAPI): prometheus_multiproc_dir_path = os.getenv("PROMETHEUS_MULTIPROC_DIR", None) if prometheus_multiproc_dir_path is not None: - logger.debug("vLLM to use %s as PROMETHEUS_MULTIPROC_DIR", - prometheus_multiproc_dir_path) + logger.info("vLLM to use %s as PROMETHEUS_MULTIPROC_DIR", + prometheus_multiproc_dir_path) registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) @@ -305,7 +305,7 @@ async def health(raw_request: Request) -> Response: async def tokenize(request: TokenizeRequest, raw_request: Request): handler = tokenization(raw_request) - generator = await handler.create_tokenize(request, raw_request) + generator = await handler.create_tokenize(request) if isinstance(generator, ErrorResponse): return JSONResponse(content=generator.model_dump(), status_code=generator.code) @@ -319,7 +319,7 @@ async def tokenize(request: TokenizeRequest, raw_request: Request): async def detokenize(request: DetokenizeRequest, raw_request: Request): handler = tokenization(raw_request) - generator = await handler.create_detokenize(request, raw_request) + generator = await handler.create_detokenize(request) if isinstance(generator, ErrorResponse): return JSONResponse(content=generator.model_dump(), status_code=generator.code) diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index 0af7613a473a4..54ca0463bcab1 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -176,8 +176,7 @@ async def create_chat_completion( logger.exception("Error in preprocessing prompt inputs") return self.create_error_response(str(e)) - request_id = "chatcmpl-" \ - f"{self._base_request_id(raw_request, request.request_id)}" + request_id = f"chatcmpl-{request.request_id}" request_metadata = RequestResponseMetadata(request_id=request_id) if raw_request: diff --git a/vllm/entrypoints/openai/serving_completion.py b/vllm/entrypoints/openai/serving_completion.py index c54d5f07cf58c..fc1c4908d6650 100644 --- a/vllm/entrypoints/openai/serving_completion.py +++ b/vllm/entrypoints/openai/serving_completion.py @@ -30,7 +30,7 @@ from vllm.sampling_params import BeamSearchParams, SamplingParams from vllm.sequence import Logprob from vllm.transformers_utils.tokenizer import AnyTokenizer -from vllm.utils import merge_async_iterators +from vllm.utils import merge_async_iterators, random_uuid logger = init_logger(__name__) @@ -86,7 +86,7 @@ async def create_completion( "suffix is not currently supported") model_name = self.base_model_paths[0].name - request_id = f"cmpl-{self._base_request_id(raw_request)}" + request_id = f"cmpl-{random_uuid()}" created_time = int(time.time()) request_metadata = RequestResponseMetadata(request_id=request_id) diff --git a/vllm/entrypoints/openai/serving_embedding.py b/vllm/entrypoints/openai/serving_embedding.py index 3f7b75e893cad..2cbb252610e39 100644 --- a/vllm/entrypoints/openai/serving_embedding.py +++ b/vllm/entrypoints/openai/serving_embedding.py @@ -19,7 +19,7 @@ from vllm.entrypoints.openai.serving_engine import BaseModelPath, OpenAIServing from vllm.logger import init_logger from vllm.outputs import PoolingOutput, PoolingRequestOutput -from vllm.utils import merge_async_iterators +from vllm.utils import merge_async_iterators, random_uuid logger = init_logger(__name__) @@ -110,7 +110,7 @@ async def create_embedding( "dimensions is currently not supported") model_name = request.model - request_id = f"embd-{self._base_request_id(raw_request)}" + request_id = f"embd-{random_uuid()}" created_time = int(time.monotonic()) truncate_prompt_tokens = None diff --git a/vllm/entrypoints/openai/serving_engine.py b/vllm/entrypoints/openai/serving_engine.py index 63f27b955461e..8232c6116c1bd 100644 --- a/vllm/entrypoints/openai/serving_engine.py +++ b/vllm/entrypoints/openai/serving_engine.py @@ -6,7 +6,6 @@ from typing import (Any, Callable, Dict, Iterable, Iterator, List, Mapping, Optional, Sequence, Tuple, TypedDict, Union) -from fastapi import Request from pydantic import Field from starlette.datastructures import Headers from typing_extensions import Annotated @@ -48,7 +47,7 @@ from vllm.tracing import (contains_trace_headers, extract_trace_headers, log_tracing_disabled_warning) from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer -from vllm.utils import AtomicCounter, is_list_of, make_async, random_uuid +from vllm.utils import AtomicCounter, is_list_of, make_async logger = init_logger(__name__) @@ -566,14 +565,6 @@ async def _get_trace_headers( return None - @staticmethod - def _base_request_id(raw_request: Request, - default: Optional[str] = None) -> Optional[str]: - """Pulls the request id to use from a header, if provided""" - default = default or random_uuid() - return raw_request.headers.get( - "X-Request-Id", default) if raw_request is not None else default - @staticmethod def _get_decoded_token(logprob: Logprob, token_id: int, diff --git a/vllm/entrypoints/openai/serving_score.py b/vllm/entrypoints/openai/serving_score.py index fed06fa452955..a1f14449ba9c3 100644 --- a/vllm/entrypoints/openai/serving_score.py +++ b/vllm/entrypoints/openai/serving_score.py @@ -15,7 +15,7 @@ from vllm.logger import init_logger from vllm.outputs import PoolingRequestOutput from vllm.transformers_utils.tokenizers.mistral import MistralTokenizer -from vllm.utils import make_async, merge_async_iterators +from vllm.utils import make_async, merge_async_iterators, random_uuid logger = init_logger(__name__) @@ -102,7 +102,7 @@ async def create_score( return error_check_ret model_name = request.model - request_id = f"score-{self._base_request_id(raw_request)}" + request_id = f"score-{random_uuid()}" created_time = int(time.monotonic()) truncate_prompt_tokens = request.truncate_prompt_tokens diff --git a/vllm/entrypoints/openai/serving_tokenization.py b/vllm/entrypoints/openai/serving_tokenization.py index 2e849333680d4..9c3dc2c98b2dd 100644 --- a/vllm/entrypoints/openai/serving_tokenization.py +++ b/vllm/entrypoints/openai/serving_tokenization.py @@ -1,7 +1,5 @@ from typing import Final, List, Optional, Union -from fastapi import Request - from vllm.config import ModelConfig from vllm.engine.protocol import EngineClient from vllm.entrypoints.chat_utils import ChatTemplateContentFormatOption @@ -19,6 +17,7 @@ LoRAModulePath, OpenAIServing) from vllm.logger import init_logger +from vllm.utils import random_uuid logger = init_logger(__name__) @@ -49,13 +48,12 @@ def __init__( async def create_tokenize( self, request: TokenizeRequest, - raw_request: Request, ) -> Union[TokenizeResponse, ErrorResponse]: error_check_ret = await self._check_model(request) if error_check_ret is not None: return error_check_ret - request_id = f"tokn-{self._base_request_id(raw_request)}" + request_id = f"tokn-{random_uuid()}" try: ( @@ -114,13 +112,12 @@ async def create_tokenize( async def create_detokenize( self, request: DetokenizeRequest, - raw_request: Request, ) -> Union[DetokenizeResponse, ErrorResponse]: error_check_ret = await self._check_model(request) if error_check_ret is not None: return error_check_ret - request_id = f"tokn-{self._base_request_id(raw_request)}" + request_id = f"tokn-{random_uuid()}" ( lora_request, diff --git a/vllm/envs.py b/vllm/envs.py index ab12a7b48dc53..28797ac1e4af2 100644 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -8,6 +8,7 @@ VLLM_RPC_BASE_PATH: str = tempfile.gettempdir() VLLM_USE_MODELSCOPE: bool = False VLLM_RINGBUFFER_WARNING_INTERVAL: int = 60 + VLLM_INSTANCE_ID: Optional[str] = None VLLM_NCCL_SO_PATH: Optional[str] = None LD_LIBRARY_PATH: Optional[str] = None VLLM_USE_TRITON_FLASH_ATTN: bool = False @@ -174,6 +175,11 @@ def get_default_config_root(): "VLLM_USE_MODELSCOPE": lambda: os.environ.get("VLLM_USE_MODELSCOPE", "False").lower() == "true", + # Instance id represents an instance of the VLLM. All processes in the same + # instance should have the same instance id. + "VLLM_INSTANCE_ID": + lambda: os.environ.get("VLLM_INSTANCE_ID", None), + # Interval in seconds to log a warning message when the ring buffer is full "VLLM_RINGBUFFER_WARNING_INTERVAL": lambda: int(os.environ.get("VLLM_RINGBUFFER_WARNING_INTERVAL", "60")), diff --git a/vllm/executor/cpu_executor.py b/vllm/executor/cpu_executor.py index 2816b5c5c1f88..6b4cb5a9a1d61 100644 --- a/vllm/executor/cpu_executor.py +++ b/vllm/executor/cpu_executor.py @@ -10,7 +10,8 @@ from vllm.model_executor.layers.sampler import SamplerOutput from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sequence import ExecuteModelRequest -from vllm.utils import get_distributed_init_method, get_open_port, make_async +from vllm.utils import (get_distributed_init_method, get_open_port, + get_vllm_instance_id, make_async) from vllm.worker.worker_base import WorkerWrapperBase logger = init_logger(__name__) @@ -30,6 +31,9 @@ def _init_executor(self) -> None: # Environment variables for CPU executor # + # Ensure that VLLM_INSTANCE_ID is set, to be inherited by workers + os.environ["VLLM_INSTANCE_ID"] = get_vllm_instance_id() + # Disable torch async compiling which won't work with daemonic processes os.environ["TORCHINDUCTOR_COMPILE_THREADS"] = "1" diff --git a/vllm/executor/multiproc_gpu_executor.py b/vllm/executor/multiproc_gpu_executor.py index fc58163cade64..a6c05a71d2b6f 100644 --- a/vllm/executor/multiproc_gpu_executor.py +++ b/vllm/executor/multiproc_gpu_executor.py @@ -3,19 +3,25 @@ from functools import partial from typing import Any, List, Optional +import torch + from vllm.executor.distributed_gpu_executor import ( # yapf: disable DistributedGPUExecutor, DistributedGPUExecutorAsync) from vllm.executor.gpu_executor import create_worker -from vllm.executor.multiproc_worker_utils import ( - ProcessWorkerWrapper, ResultHandler, WorkerMonitor, - set_multiprocessing_worker_envs) +from vllm.executor.multiproc_worker_utils import (ProcessWorkerWrapper, + ResultHandler, WorkerMonitor) from vllm.logger import init_logger from vllm.model_executor.layers.sampler import SamplerOutput from vllm.sequence import ExecuteModelRequest +from vllm.triton_utils.importing import HAS_TRITON from vllm.utils import (_run_task_with_lock, cuda_device_count_stateless, - get_distributed_init_method, get_open_port, make_async, + cuda_is_initialized, get_distributed_init_method, + get_open_port, get_vllm_instance_id, make_async, update_environment_variables) +if HAS_TRITON: + from vllm.triton_utils import maybe_set_triton_cache_manager + logger = init_logger(__name__) @@ -31,8 +37,33 @@ def _init_executor(self) -> None: world_size = self.parallel_config.world_size tensor_parallel_size = self.parallel_config.tensor_parallel_size - # Set multiprocessing envs that are common to V0 and V1 - set_multiprocessing_worker_envs(self.parallel_config) + # Ensure that VLLM_INSTANCE_ID is set, to be inherited by workers + os.environ["VLLM_INSTANCE_ID"] = get_vllm_instance_id() + + # Disable torch async compiling which won't work with daemonic processes + os.environ["TORCHINDUCTOR_COMPILE_THREADS"] = "1" + + # Configure thread parallelism if OMP_NUM_THREADS isn't set + # + # Helps to avoid CPU contention. The default of spawning a thread per + # core combined with multiprocessing for each GPU can have a negative + # impact on performance. The contention is amplified when running in a + # container where CPU limits can cause throttling. + default_omp_num_threads = 1 + if "OMP_NUM_THREADS" not in os.environ and ( + current_parallelism := + torch.get_num_threads()) > default_omp_num_threads: + logger.warning( + "Reducing Torch parallelism from %d threads to %d to avoid " + "unnecessary CPU contention. Set OMP_NUM_THREADS in the " + "external environment to tune this value as needed.", + current_parallelism, default_omp_num_threads) + os.environ["OMP_NUM_THREADS"] = str(default_omp_num_threads) + torch.set_num_threads(default_omp_num_threads) + + # workaround for https://github.com/vllm-project/vllm/issues/6103 + if HAS_TRITON and world_size > 1: + maybe_set_triton_cache_manager() # Multiprocessing-based executor does not support multi-node setting. # Since it only works for single node, we can use the loopback address @@ -94,6 +125,13 @@ def _check_executor_parameters(self): "CUDA_VISIBLE_DEVICES": (",".join(map(str, range(world_size)))) }) + if (cuda_is_initialized() + and os.environ.get("VLLM_WORKER_MULTIPROC_METHOD") != "spawn"): + logger.warning("CUDA was previously initialized. We must use " + "the `spawn` multiprocessing start method. Setting " + "VLLM_WORKER_MULTIPROC_METHOD to 'spawn'.") + os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn" + cuda_device_count = cuda_device_count_stateless() # Use confusing message for more common TP-only case. assert tensor_parallel_size <= cuda_device_count, ( diff --git a/vllm/executor/multiproc_worker_utils.py b/vllm/executor/multiproc_worker_utils.py index fe475db6d3f57..884267d23dfc8 100644 --- a/vllm/executor/multiproc_worker_utils.py +++ b/vllm/executor/multiproc_worker_utils.py @@ -11,15 +11,8 @@ from typing import (Any, Callable, Dict, Generic, List, Optional, TextIO, TypeVar, Union) -import torch - import vllm.envs as envs from vllm.logger import init_logger -from vllm.triton_utils.importing import HAS_TRITON -from vllm.utils import cuda_is_initialized - -if HAS_TRITON: - from vllm.triton_utils import maybe_set_triton_cache_manager logger = init_logger(__name__) @@ -277,38 +270,3 @@ def write_with_prefix(s: str): def get_mp_context(): mp_method = envs.VLLM_WORKER_MULTIPROC_METHOD return multiprocessing.get_context(mp_method) - - -def set_multiprocessing_worker_envs(parallel_config): - """ Set up environment variables that should be used when there are workers - in a multiprocessing environment. This should be called by the parent - process before worker processes are created""" - - if (cuda_is_initialized() - and os.environ.get("VLLM_WORKER_MULTIPROC_METHOD") != "spawn"): - logger.warning("CUDA was previously initialized. We must use " - "the `spawn` multiprocessing start method. Setting " - "VLLM_WORKER_MULTIPROC_METHOD to 'spawn'.") - os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn" - - # Configure thread parallelism if OMP_NUM_THREADS isn't set - # - # Helps to avoid CPU contention. The default of spawning a thread per - # core combined with multiprocessing for each GPU can have a negative - # impact on performance. The contention is amplified when running in a - # container where CPU limits can cause throttling. - default_omp_num_threads = 1 - if "OMP_NUM_THREADS" not in os.environ and ( - current_parallelism := - torch.get_num_threads()) > default_omp_num_threads: - logger.warning( - "Reducing Torch parallelism from %d threads to %d to avoid " - "unnecessary CPU contention. Set OMP_NUM_THREADS in the " - "external environment to tune this value as needed.", - current_parallelism, default_omp_num_threads) - os.environ["OMP_NUM_THREADS"] = str(default_omp_num_threads) - torch.set_num_threads(default_omp_num_threads) - - # workaround for https://github.com/vllm-project/vllm/issues/6103 - if HAS_TRITON and parallel_config.world_size > 1: - maybe_set_triton_cache_manager() diff --git a/vllm/executor/ray_gpu_executor.py b/vllm/executor/ray_gpu_executor.py index 4263fb27265f6..6542b18ae70b1 100644 --- a/vllm/executor/ray_gpu_executor.py +++ b/vllm/executor/ray_gpu_executor.py @@ -15,7 +15,8 @@ from vllm.model_executor.layers.sampler import SamplerOutput from vllm.sequence import ExecuteModelRequest from vllm.utils import (_run_task_with_lock, get_distributed_init_method, - get_ip, get_open_port, make_async) + get_ip, get_open_port, get_vllm_instance_id, + make_async) if ray is not None: from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy @@ -188,14 +189,8 @@ def sort_by_driver_then_worker_ip(worker): self.workers = sorted(self.workers, key=sort_by_driver_then_worker_ip) # Get the set of GPU IDs used on each node. - worker_node_and_gpu_ids = [] - for worker in [self.driver_dummy_worker] + self.workers: - if worker is None: - # driver_dummy_worker can be None when using ray spmd worker. - continue - worker_node_and_gpu_ids.append( - ray.get(worker.get_node_and_gpu_ids.remote()) \ - ) # type: ignore + worker_node_and_gpu_ids = self._run_workers("get_node_and_gpu_ids", + use_dummy_driver=True) node_workers = defaultdict(list) # node id -> list of worker ranks node_gpus = defaultdict(list) # node id -> list of gpu ids @@ -225,10 +220,14 @@ def sort_by_driver_then_worker_ip(worker): " environment variable, make sure it is unique for" " each node.") + VLLM_INSTANCE_ID = get_vllm_instance_id() + # Set environment variables for the driver and workers. all_args_to_update_environment_variables = [({ "CUDA_VISIBLE_DEVICES": ",".join(map(str, node_gpus[node_id])), + "VLLM_INSTANCE_ID": + VLLM_INSTANCE_ID, "VLLM_TRACE_FUNCTION": str(envs.VLLM_TRACE_FUNCTION), **({ @@ -335,6 +334,7 @@ def _run_workers( async_run_tensor_parallel_workers_only: bool = False, all_args: Optional[List[Tuple[Any, ...]]] = None, all_kwargs: Optional[List[Dict[str, Any]]] = None, + use_dummy_driver: bool = False, max_concurrent_workers: Optional[int] = None, **kwargs, ) -> Any: @@ -394,10 +394,18 @@ def _run_workers( driver_kwargs = kwargs if all_kwargs is None else all_kwargs[0] # Start the driver worker after all the ray workers. - driver_worker_output = [ - self.driver_worker.execute_method(method, *driver_args, - **driver_kwargs) - ] + if not use_dummy_driver: + driver_worker_output = [ + self.driver_worker.execute_method(method, *driver_args, + **driver_kwargs) + ] + else: + assert self.driver_dummy_worker is not None + driver_worker_output = [ + ray.get( + self.driver_dummy_worker.execute_method.remote( + method, *driver_args, **driver_kwargs)) + ] # Get the results of the ray workers. if self.workers: diff --git a/vllm/executor/ray_hpu_executor.py b/vllm/executor/ray_hpu_executor.py index 3b5e77bc519ad..3b3a3bc3da42c 100644 --- a/vllm/executor/ray_hpu_executor.py +++ b/vllm/executor/ray_hpu_executor.py @@ -16,7 +16,8 @@ from vllm.model_executor.layers.sampler import SamplerOutput from vllm.sequence import ExecuteModelRequest from vllm.utils import (_run_task_with_lock, get_distributed_init_method, - get_ip, get_open_port, is_fake_hpu, make_async) + get_ip, get_open_port, get_vllm_instance_id, + is_fake_hpu, make_async) from vllm.worker.worker_base import WorkerBase if ray is not None: @@ -200,14 +201,9 @@ def sort_by_driver_then_worker_ip(worker): # node will be placed first. self.workers = sorted(self.workers, key=sort_by_driver_then_worker_ip) - worker_node_and_gpu_ids = [] - for worker in [self.driver_dummy_worker] + self.workers: - if worker is None: - # driver_dummy_worker can be None when using ray spmd worker. - continue - worker_node_and_gpu_ids.append( - ray.get(worker.get_node_and_gpu_ids.remote()) \ - ) # type: ignore + # Get the set of GPU IDs used on each node. + worker_node_and_gpu_ids = self._run_workers("get_node_and_gpu_ids", + use_dummy_driver=True) node_workers = defaultdict(list) # node id -> list of worker ranks node_gpus = defaultdict(list) # node id -> list of gpu ids @@ -237,8 +233,12 @@ def sort_by_driver_then_worker_ip(worker): "environment variable, make sure it is unique for" " each node.") + VLLM_INSTANCE_ID = get_vllm_instance_id() + # Set environment variables for the driver and workers. all_args_to_update_environment_variables = [({ + "VLLM_INSTANCE_ID": + VLLM_INSTANCE_ID, "VLLM_TRACE_FUNCTION": str(envs.VLLM_TRACE_FUNCTION), }, ) for (node_id, _) in worker_node_and_gpu_ids] @@ -338,6 +338,7 @@ def _run_workers( async_run_tensor_parallel_workers_only: bool = False, all_args: Optional[List[Tuple[Any, ...]]] = None, all_kwargs: Optional[List[Dict[str, Any]]] = None, + use_dummy_driver: bool = False, max_concurrent_workers: Optional[int] = None, **kwargs, ) -> Any: @@ -397,10 +398,18 @@ def _run_workers( driver_kwargs = kwargs if all_kwargs is None else all_kwargs[0] # Start the driver worker after all the ray workers. - driver_worker_output = [ - self.driver_worker.execute_method(method, *driver_args, - **driver_kwargs) - ] + if not use_dummy_driver: + driver_worker_output = [ + self.driver_worker.execute_method(method, *driver_args, + **driver_kwargs) + ] + else: + assert self.driver_dummy_worker is not None + driver_worker_output = [ + ray.get( + self.driver_dummy_worker.execute_method.remote( + method, *driver_args, **driver_kwargs)) + ] # Get the results of the ray workers. if self.workers: diff --git a/vllm/executor/ray_tpu_executor.py b/vllm/executor/ray_tpu_executor.py index 5118c13934f0d..c227b5e283c68 100644 --- a/vllm/executor/ray_tpu_executor.py +++ b/vllm/executor/ray_tpu_executor.py @@ -13,7 +13,7 @@ from vllm.model_executor.layers.sampler import SamplerOutput from vllm.sequence import ExecuteModelRequest from vllm.utils import (get_distributed_init_method, get_ip, get_open_port, - make_async) + get_vllm_instance_id, make_async) if ray is not None: from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy @@ -137,21 +137,19 @@ def sort_by_driver_then_worker_ip(worker): self.workers = sorted(self.workers, key=sort_by_driver_then_worker_ip) # Get the set of TPU IDs used on each node. - worker_node_and_gpu_ids = [] - for worker in [self.driver_dummy_worker] + self.workers: - if worker is None: - # driver_dummy_worker can be None when using ray spmd worker. - continue - worker_node_and_gpu_ids.append( - ray.get(worker.get_node_and_gpu_ids.remote()) \ - ) # type: ignore + worker_node_and_gpu_ids = self._run_workers("get_node_and_gpu_ids", + use_dummy_driver=True) node_workers = defaultdict(list) for i, (node_id, _) in enumerate(worker_node_and_gpu_ids): node_workers[node_id].append(i) + VLLM_INSTANCE_ID = get_vllm_instance_id() + # Set environment variables for the driver and workers. all_args_to_update_environment_variables = [({ + "VLLM_INSTANCE_ID": + VLLM_INSTANCE_ID, "VLLM_TRACE_FUNCTION": str(envs.VLLM_TRACE_FUNCTION), }, ) for _ in worker_node_and_gpu_ids] @@ -205,6 +203,7 @@ def _run_workers( async_run_remote_workers_only: bool = False, all_args: Optional[List[Tuple[Any, ...]]] = None, all_kwargs: Optional[List[Dict[str, Any]]] = None, + use_dummy_driver: bool = False, max_concurrent_workers: Optional[int] = None, use_ray_compiled_dag: bool = False, **kwargs, @@ -246,8 +245,14 @@ def _run_workers( driver_kwargs = kwargs if all_kwargs is None else all_kwargs[0] # Start the driver worker after all the ray workers. - driver_worker_output = self.driver_worker.execute_method( - method, *driver_args, **driver_kwargs) + if not use_dummy_driver: + driver_worker_output = self.driver_worker.execute_method( + method, *driver_args, **driver_kwargs) + else: + assert self.driver_dummy_worker is not None + driver_worker_output = ray.get( + self.driver_dummy_worker.execute_method.remote( + method, *driver_args, **driver_kwargs)) # Get the results of the ray workers. if self.workers: ray_worker_outputs = ray.get(ray_worker_outputs) diff --git a/vllm/executor/ray_xpu_executor.py b/vllm/executor/ray_xpu_executor.py index d2086f5fef26c..2b1cdc09b0a9f 100644 --- a/vllm/executor/ray_xpu_executor.py +++ b/vllm/executor/ray_xpu_executor.py @@ -1,13 +1,11 @@ import asyncio from typing import List, Optional -import ray - import vllm.envs as envs from vllm.executor.ray_gpu_executor import RayGPUExecutor, RayGPUExecutorAsync from vllm.executor.xpu_executor import XPUExecutor from vllm.logger import init_logger -from vllm.utils import make_async +from vllm.utils import get_vllm_instance_id, make_async logger = init_logger(__name__) @@ -16,16 +14,15 @@ class RayXPUExecutor(RayGPUExecutor, XPUExecutor): def _get_env_vars_to_be_updated(self): # Get the set of GPU IDs used on each node. - worker_node_and_gpu_ids = [] - for worker in [self.driver_dummy_worker] + self.workers: - if worker is None: - # driver_dummy_worker can be None when using ray spmd worker. - continue - worker_node_and_gpu_ids.append( - ray.get(worker.get_node_and_gpu_ids.remote())) # type: ignore + worker_node_and_gpu_ids = self._run_workers("get_node_and_gpu_ids", + use_dummy_driver=True) + + VLLM_INSTANCE_ID = get_vllm_instance_id() # Set environment variables for the driver and workers. all_args_to_update_environment_variables = [({ + "VLLM_INSTANCE_ID": + VLLM_INSTANCE_ID, "VLLM_TRACE_FUNCTION": str(envs.VLLM_TRACE_FUNCTION), }, ) for (_, _) in worker_node_and_gpu_ids] diff --git a/vllm/inputs/registry.py b/vllm/inputs/registry.py index 0dfed3b7e61bf..85ab4355cc2e4 100644 --- a/vllm/inputs/registry.py +++ b/vllm/inputs/registry.py @@ -69,12 +69,12 @@ class InputProcessingContext(InputContext): tokenizer: AnyTokenizer """The tokenizer used to tokenize the inputs.""" - def get_hf_processor(self, **kwargs) -> ProcessorMixin: + def get_hf_processor(self) -> ProcessorMixin: return cached_get_processor( self.model_config.tokenizer, tokenizer=self.tokenizer, # Override the tokenizer with ours trust_remote_code=self.model_config.trust_remote_code, - **kwargs) + ) N = TypeVar("N", bound=Type[nn.Module]) @@ -232,35 +232,19 @@ def dummy_data_for_profiling( """ # Avoid circular import from vllm.model_executor.model_loader import get_model_architecture - from vllm.multimodal import MultiModalKwargs - from vllm.multimodal.utils import cached_get_tokenizer - - if mm_registry.has_processor(model_config): - tokenizer = cached_get_tokenizer( - model_config.tokenizer, - trust_remote_code=model_config.trust_remote_code, - ) - processor = mm_registry.create_processor(model_config, tokenizer) - - mm_counts = mm_registry.get_mm_limits_per_prompt(model_config) - mm_max_tokens = mm_registry.get_max_tokens_by_modality( - model_config) - - dummy_data = processor.get_dummy_data(seq_len, mm_counts, - mm_max_tokens) + + model_cls, _ = get_model_architecture(model_config) + if is_encoder_data: + dummy_factory = self._get_dummy_encoder_data_factory(model_cls) else: - model_cls, _ = get_model_architecture(model_config) - if is_encoder_data: - dummy_factory = self._get_dummy_encoder_data_factory(model_cls) - else: - dummy_factory = self._get_dummy_data_factory(model_cls) - mm_counts = mm_registry.get_mm_limits_per_prompt(model_config) - mm_processor_kwargs = get_allowed_kwarg_only_overrides( - dummy_factory, overrides=model_config.mm_processor_kwargs) + dummy_factory = self._get_dummy_data_factory(model_cls) + mm_counts = mm_registry.get_mm_limits_per_prompt(model_config) + mm_processor_kwargs = get_allowed_kwarg_only_overrides( + dummy_factory, overrides=model_config.mm_processor_kwargs) - dummy_data = dummy_factory(InputContext(model_config), seq_len, - _MultiModalCounts(mm_counts), - **mm_processor_kwargs) + dummy_data = dummy_factory(InputContext(model_config), seq_len, + _MultiModalCounts(mm_counts), + **mm_processor_kwargs) # Having more tokens is over-conservative but otherwise fine num_tokens = dummy_data.seq_data.prompt_token_ids @@ -273,9 +257,7 @@ def dummy_data_for_profiling( raise AssertionError( f"Expected at least {seq_len} dummy tokens for profiling, " f"but found {len(num_tokens)} tokens instead.") - - if (dummy_data.multi_modal_data is not None and - not isinstance(dummy_data.multi_modal_data, MultiModalKwargs)): + if dummy_data.multi_modal_data is not None: for k, v in dummy_data.multi_modal_data.items(): num_items = len(v) if isinstance(v, list) else 1 num_expected = mm_counts[k] diff --git a/vllm/lora/layers.py b/vllm/lora/layers.py index 12e09af1327a2..bec904c30a660 100644 --- a/vllm/lora/layers.py +++ b/vllm/lora/layers.py @@ -17,6 +17,7 @@ tensor_model_parallel_all_reduce, tensor_model_parallel_gather) from vllm.distributed.utils import divide +from vllm.lora.punica import PunicaWrapper # yapf: disable from vllm.model_executor.layers.linear import (ColumnParallelLinear, LinearBase, @@ -33,7 +34,7 @@ from vllm.platforms import current_platform if TYPE_CHECKING: - from vllm.lora.punica_wrapper import PunicaWrapperBase + pass def _get_lora_device(base_layer: nn.Module) -> torch.device: @@ -115,9 +116,9 @@ def set_lora( def set_mapping( self, - punica_wrapper, + punica_wrapper: PunicaWrapper, ): - self.punica_wrapper: PunicaWrapperBase = punica_wrapper + self.punica_wrapper: PunicaWrapper = punica_wrapper @classmethod def can_replace_layer( @@ -543,20 +544,10 @@ class MergedColumnParallelLinearWithLoRA(ColumnParallelLinearWithLoRA): Both slices must have the same size. """ - def __init__( - self, base_layer: Union[MergedColumnParallelLinear, - QKVParallelLinear]) -> None: + def __init__(self, base_layer: MergedColumnParallelLinear) -> None: super().__init__(base_layer) # There are two LoRA layers - self.tp_size = get_tensor_model_parallel_world_size() - self.tp_rank = get_tensor_model_parallel_rank() - # the output_sizes in MergedColumnParallelLinear is not sharded by tp - # we need to divide it by the tp_size to get correct slices size - output_sizes = self.base_layer.output_sizes - self.output_slices = tuple( - divide(output_size, self.tp_size) for output_size in output_sizes) - self.n_slices = len(self.output_slices) - self.output_ids = (self.tp_rank, ) * self.n_slices + self.n_slices = len(self.base_layer.output_sizes) def create_lora_weights( self, @@ -570,6 +561,15 @@ def create_lora_weights( """ self.lora_config = lora_config + if not (len(self.base_layer.output_sizes) == self.n_slices == 2 + and self.base_layer.output_sizes[0] + == self.base_layer.output_sizes[1]): + raise ValueError( + "LoRAColumnParallelLinear2Slice requires 2 slices with " + "the same size.") + self.tp_size = get_tensor_model_parallel_world_size() + self.tp_rank = get_tensor_model_parallel_rank() + lora_a_output_size_per_partition = ( lora_config.max_lora_rank if not lora_config.fully_sharded_loras else divide(lora_config.max_lora_rank, self.tp_size)) @@ -587,20 +587,22 @@ def create_lora_weights( torch.zeros( max_loras, 1, - output_size, + self.output_size // 2, lora_config.max_lora_rank, dtype=lora_config.lora_dtype, device=self.device, - ) for output_size in self.output_slices) + ) for _ in range(self.n_slices)) if lora_config.bias_enabled: self.lora_bias_stacked = tuple( torch.zeros( max_loras, 1, - output_size, + self.output_size // 2, dtype=lora_config.lora_dtype, device=self.device, - ) for output_size in self.output_slices) + ) for _ in range(self.n_slices)) + self.output_dim = self.lora_b_stacked[0].shape[2] + self.output_slices = (self.output_dim, self.output_dim) def slice_lora_a( self, lora_a: List[Union[torch.Tensor, None]] @@ -610,21 +612,27 @@ def slice_lora_a( def slice_lora_b( self, lora_b: List[Union[torch.Tensor, None]] ) -> List[Union[torch.Tensor, None]]: - for i, (shard_id, shard_size) in enumerate( - zip(self.output_ids, self.output_slices)): - if (lora_b_i := lora_b[i]) is not None: - lora_b[i] = lora_b_i[:, shard_size * shard_id:shard_size * - (shard_id + 1)] + #NOTE: lora_b contains 2 subloras, and each sublora could be None. + shard_size = self.output_dim + start_idx = self.tp_rank * shard_size + end_idx = (self.tp_rank + 1) * shard_size + lora_b = [ + lora_b[0][:, start_idx:end_idx] if lora_b[0] is not None else None, + lora_b[1][:, start_idx:end_idx] if lora_b[1] is not None else None, + ] return lora_b def slice_bias( self, bias: List[Union[torch.Tensor, None]]) -> List[Union[torch.Tensor, None]]: - for i, (shard_id, shard_size) in enumerate( - zip(self.output_ids, self.output_slices)): - if (bias_i := bias[i]) is not None: - bias[i] = bias_i[shard_size * shard_id:shard_size * - (shard_id + 1)] + # NOTE : each bias could be None. + shard_size = self.output_dim + start_idx = self.tp_rank * shard_size + end_idx = (self.tp_rank + 1) * shard_size + bias = [ + bias[0][start_idx:end_idx] if bias[0] is not None else None, + bias[1][start_idx:end_idx] if bias[1] is not None else None + ] return bias def set_lora( @@ -643,25 +651,30 @@ def set_lora( if lora_bias is not None: lora_bias = self.slice_bias(lora_bias) - for i in range(self.n_slices): - if (lora_a_i := lora_a[i]) is not None: - self.lora_a_stacked[i][ - index, 0, :lora_a_i.shape[1], :lora_a_i.shape[0]].copy_( - lora_a_i.T, non_blocking=True) - if (lora_b_i := lora_b[i]) is not None: - self.lora_b_stacked[i][ - index, 0, :lora_b_i.shape[1], :lora_b_i.shape[0]].copy_( - lora_b_i.T, non_blocking=True) - - if lora_bias is not None: + if lora_a[0] is not None: + self.lora_a_stacked[0][ + index, 0, :lora_a[0].shape[1], :lora_a[0].shape[0]].copy_( + lora_a[0].T, non_blocking=True) + self.lora_b_stacked[0][ + index, 0, :lora_b[0].shape[1], :lora_b[0].shape[0]].copy_( + lora_b[0].T, non_blocking=True) + if lora_bias is not None and lora_bias[0] is not None: + self.lora_bias_stacked = cast(Tuple[torch.Tensor, ...], + self.lora_bias_stacked) + self.lora_bias_stacked[0][index, 0, :lora_bias[0].shape[0]].copy_( + lora_bias[0].T, non_blocking=True) + if lora_a[1] is not None: + self.lora_a_stacked[1][ + index, 0, :lora_a[1].shape[1], :lora_a[1].shape[0]].copy_( + lora_a[1].T, non_blocking=True) + self.lora_b_stacked[1][ + index, 0, :lora_b[1].shape[1], :lora_b[1].shape[0]].copy_( + lora_b[1].T, non_blocking=True) + if lora_bias is not None and lora_bias[1] is not None: self.lora_bias_stacked = cast(Tuple[torch.Tensor, ...], self.lora_bias_stacked) - for i in range(self.n_slices): - if (lora_bias_i := lora_bias[i]) is not None: - self.lora_bias_stacked[i][index, - 0, :lora_bias_i.shape[0]].copy_( - lora_bias_i.T, - non_blocking=True) + self.lora_bias_stacked[1][index, 0, :lora_bias[1].shape[0]].copy_( + lora_bias[1].T, non_blocking=True) @classmethod @_not_fully_sharded_can_replace @@ -744,8 +757,8 @@ def can_replace_layer(cls, source_layer: nn.Module, packed_modules_list) == 1 -class MergedQKVParallelLinearWithLora(MergedColumnParallelLinearWithLoRA): - """MergedColumnParallelLinear layer that is composed of 3 sublayers (slices) +class MergedQKVParallelLinearWithLora(ColumnParallelLinearWithLoRA): + """ColumnParallelLinear layer that is composed of 3 sublayers (slices) packed together in qkv proj fashion (q_proj + k_proj + v_proj -> qkv_proj). @@ -762,6 +775,22 @@ def __init__(self, base_layer: QKVParallelLinear) -> None: self.tp_size = get_tensor_model_parallel_world_size() self.tp_rank = get_tensor_model_parallel_rank() + def create_lora_weights( + self, + max_loras: int, + lora_config: LoRAConfig, + model_config: Optional[PretrainedConfig] = None, + ) -> None: + """ + The main reason for overloading this function is to handle inconsistent + weight dimensions in qkv lora. + """ + self.lora_config = lora_config + + if not (len(self.base_layer.output_sizes) == self.n_slices == 3): + raise ValueError( + "LoRAColumnParallelLinear3Slice requires 3 slices.") + self.q_proj_shard_size = (self.base_layer.num_heads * self.base_layer.head_size) self.kv_proj_shard_size = (self.base_layer.num_kv_heads * @@ -769,28 +798,203 @@ def __init__(self, base_layer: QKVParallelLinear) -> None: self.q_shard_id = self.tp_rank self.kv_shard_id = self.tp_rank // self.base_layer.num_kv_head_replicas + lora_a_output_size_per_partition = ( + lora_config.max_lora_rank if not lora_config.fully_sharded_loras + else divide(lora_config.max_lora_rank, self.tp_size)) + # q, k, v + self.lora_a_stacked = ( + torch.zeros( + max_loras, + 1, + lora_a_output_size_per_partition, + self.input_size, + dtype=lora_config.lora_dtype, + device=self.device, + ), + torch.zeros( + max_loras, + 1, + lora_a_output_size_per_partition, + self.input_size, + dtype=lora_config.lora_dtype, + device=self.device, + ), + torch.zeros( + max_loras, + 1, + lora_a_output_size_per_partition, + self.input_size, + dtype=lora_config.lora_dtype, + device=self.device, + ), + ) + self.lora_b_stacked = ( + torch.zeros( + max_loras, + 1, + self.q_proj_shard_size, + lora_config.max_lora_rank, + dtype=lora_config.lora_dtype, + device=self.device, + ), + torch.zeros( + max_loras, + 1, + self.kv_proj_shard_size, + lora_config.max_lora_rank, + dtype=lora_config.lora_dtype, + device=self.device, + ), + torch.zeros( + max_loras, + 1, + self.kv_proj_shard_size, + lora_config.max_lora_rank, + dtype=lora_config.lora_dtype, + device=self.device, + ), + ) + if lora_config.bias_enabled: + self.lora_bias_stacked = ( + torch.zeros( + max_loras, + 1, + self.q_proj_shard_size, + dtype=lora_config.lora_dtype, + device=self.device, + ), + torch.zeros( + max_loras, + 1, + self.kv_proj_shard_size, + dtype=lora_config.lora_dtype, + device=self.device, + ), + torch.zeros( + max_loras, + 1, + self.kv_proj_shard_size, + dtype=lora_config.lora_dtype, + device=self.device, + ), + ) self.output_slices = ( self.q_proj_shard_size, self.kv_proj_shard_size, self.kv_proj_shard_size, ) - self.output_ids = ( - self.q_shard_id, - self.kv_shard_id, - self.kv_shard_id, - ) + self.packed_indices: Optional[torch.Tensor] = None + self.standard_indices: Optional[torch.Tensor] = None + # lazily initialized. + self.indices: torch.Tensor + self.indices_len: List[int] - def create_lora_weights( + def slice_lora_a( + self, lora_a: List[Union[torch.Tensor, None]] + ) -> List[Union[torch.Tensor, None]]: + return lora_a + + def slice_lora_b( + self, lora_b: List[Union[torch.Tensor, None]] + ) -> List[Union[torch.Tensor, None]]: + lora_b_q, lora_b_k, lora_b_v = None, None, None + if lora_b[0] is not None: + lora_b_q = lora_b[0][:, self.q_proj_shard_size * + self.q_shard_id:self.q_proj_shard_size * + (self.q_shard_id + 1), ] + if lora_b[1] is not None: + lora_b_k = lora_b[1][:, self.kv_proj_shard_size * + self.kv_shard_id:self.kv_proj_shard_size * + (self.kv_shard_id + 1), ] + if lora_b[2] is not None: + lora_b_v = lora_b[2][:, self.kv_proj_shard_size * + self.kv_shard_id:self.kv_proj_shard_size * + (self.kv_shard_id + 1), ] + lora_b = [lora_b_q, lora_b_k, lora_b_v] + return lora_b + + def slice_bias( + self, bias: List[Union[torch.Tensor, + None]]) -> List[Union[torch.Tensor, None]]: + bias_q, bias_k, bias_v = bias + if bias_q is not None: + bias_q = bias_q[self.q_proj_shard_size * + self.q_shard_id:self.q_proj_shard_size * + (self.q_shard_id + 1)] + if bias_k is not None: + bias_k = bias_k[self.kv_proj_shard_size * + self.kv_shard_id:self.kv_proj_shard_size * + (self.kv_shard_id + 1)] + if bias_v is not None: + bias_v = bias_v[self.kv_proj_shard_size * + self.kv_shard_id:self.kv_proj_shard_size * + (self.kv_shard_id + 1)] + bias = [bias_q, bias_k, bias_v] + return bias + + def set_lora( self, - max_loras: int, - lora_config: LoRAConfig, - model_config: Optional[PretrainedConfig] = None, - ) -> None: - """ - The main reason for overloading this function is to handle inconsistent - weight dimensions in qkv lora. - """ - super().create_lora_weights(max_loras, lora_config, model_config) + index: int, + lora_a: torch.Tensor, + lora_b: torch.Tensor, + embeddings_tensor: Optional[torch.Tensor], + lora_bias: Optional[torch.Tensor] = None, + ): + self.reset_lora(index) + + if self.tp_size > 1: + lora_a = self.slice_lora_a(lora_a) + lora_b = self.slice_lora_b(lora_b) + if lora_bias is not None: + lora_bias = self.slice_bias(lora_bias) + + if lora_b[0] is not None: + lora_b_q = lora_b[0] + self.lora_b_stacked[0][ + index, 0, :lora_b_q.shape[1], :lora_b_q.shape[0]].copy_( + lora_b_q.T, non_blocking=True) + if lora_b[1] is not None: + lora_b_k = lora_b[1] + self.lora_b_stacked[1][ + index, 0, :lora_b_k.shape[1], :lora_b_k.shape[0]].copy_( + lora_b_k.T, non_blocking=True) + if lora_b[2] is not None: + lora_b_v = lora_b[2] + self.lora_b_stacked[2][ + index, 0, :lora_b_v.shape[1], :lora_b_v.shape[0]].copy_( + lora_b_v.T, non_blocking=True) + + if lora_a[0] is not None: + self.lora_a_stacked[0][ + index, 0, :lora_a[0].shape[1], :lora_a[0].shape[0]].copy_( + lora_a[0].T, non_blocking=True) + if lora_a[1] is not None: + self.lora_a_stacked[1][ + index, 0, :lora_a[1].shape[1], :lora_a[1].shape[0]].copy_( + lora_a[1].T, non_blocking=True) + if lora_a[2] is not None: + self.lora_a_stacked[2][ + index, 0, :lora_a[2].shape[1], :lora_a[2].shape[0]].copy_( + lora_a[2].T, non_blocking=True) + + if lora_bias is not None: + self.lora_bias_stacked = cast(Tuple[torch.Tensor, ...], + self.lora_bias_stacked) + if lora_bias[0] is not None: + self.lora_bias_stacked[0][index, + 0, :lora_bias[0].shape[0]].copy_( + lora_bias[0].T, + non_blocking=True) + if lora_bias[1] is not None: + self.lora_bias_stacked[1][index, + 0, :lora_bias[1].shape[0]].copy_( + lora_bias[1].T, + non_blocking=True) + if lora_bias[2] is not None: + self.lora_bias_stacked[2][index, + 0, :lora_bias[2].shape[0]].copy_( + lora_bias[2].T, + non_blocking=True) @classmethod @_not_fully_sharded_can_replace @@ -1070,11 +1274,8 @@ def _get_logits( ).index_select(0, indices_padded).nan_to_num_(nan=float("-inf"), posinf=float("inf"), neginf=float("-inf"))) - - # HPU needs special handling to prune out dummy samples if current_platform.is_hpu(): lora_logits = lora_logits[:logits.shape[0], :] - logits[:, self.base_layer.org_vocab_size:self.base_layer.org_vocab_size + lora_logits.shape[1]] = lora_logits diff --git a/vllm/lora/models.py b/vllm/lora/models.py index e30e723680c51..a3ae7fb33f155 100644 --- a/vllm/lora/models.py +++ b/vllm/lora/models.py @@ -22,7 +22,7 @@ LinearScalingRotaryEmbeddingWithLora, LoRAMapping) from vllm.lora.lora import LoRALayerWeights, PackedLoRALayerWeights -from vllm.lora.punica_wrapper import get_punica_wrapper +from vllm.lora.punica import PunicaWrapper from vllm.lora.utils import (from_layer, from_layer_logits_processor, is_regex_target_modules, parse_fine_tuned_lora_name, replace_submodule) @@ -32,6 +32,9 @@ from vllm.platforms import current_platform from vllm.utils import is_pin_memory_available +if current_platform.is_hpu(): + from vllm_hpu_extension.punica_hpu import GaudiPunicaWrapper + logger = init_logger(__name__) _GLOBAL_LORA_ID = 0 @@ -443,12 +446,15 @@ def __init__( self.lora_index_to_id: List[Optional[int]] = [None] * self.lora_slots self.vocab_size = vocab_size self.long_lora_context: Optional[LongContextLoRAContext] = None - punica_max_num_batched_tokens = max_num_batched_tokens if current_platform.is_hpu(): - punica_max_num_batched_tokens = 3 * max_num_batched_tokens - self.punica_wrapper = get_punica_wrapper(punica_max_num_batched_tokens, - max_batches=self.max_num_seqs, - device=self.device) + self.punica_wrapper = GaudiPunicaWrapper( + 3 * max_num_batched_tokens, + max_batches=self.max_num_seqs, + device="hpu") + else: + self.punica_wrapper = PunicaWrapper(max_num_batched_tokens, + max_batches=self.max_num_seqs, + device=self.device) # Scaling factor -> offset to the sin_cos_cache to it. # Used for long context lora. self.scaling_factor_to_offset: Dict[float, int] = {} diff --git a/vllm/lora/punica.py b/vllm/lora/punica.py new file mode 100644 index 0000000000000..fd337df0277a3 --- /dev/null +++ b/vllm/lora/punica.py @@ -0,0 +1,740 @@ +""" +Based on: +Chen, L., Ye, Z., Wu, Y., Zhuo, D., Ceze, L., & Krishnamurthy, A. (2023). +Punica: Multi-Tenant LoRA Serving. +https://arxiv.org/abs/2310.18547 +""" + +from typing import TYPE_CHECKING, Callable, List, Optional, Tuple, Union + +import torch + +from vllm.triton_utils import HAS_TRITON +from vllm.utils import get_device + +if HAS_TRITON: + from vllm.lora.ops.bgmv_expand import bgmv_expand + from vllm.lora.ops.bgmv_expand_slice import bgmv_expand_slice + from vllm.lora.ops.bgmv_shrink import bgmv_shrink + from vllm.lora.ops.sgmv_expand import sgmv_expand + from vllm.lora.ops.sgmv_expand_slice import sgmv_expand_slice + from vllm.lora.ops.sgmv_shrink import sgmv_shrink + +if TYPE_CHECKING: + # avoid circuit import + from vllm.lora.layers import LoRAMapping + from vllm.lora.models import LongContextLoRAContext + + +def compute_meta( + token_lora_tensor: torch.Tensor +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int, int, int, bool]: + """ + Get the information required for the sgmv kernel. With the features: + 1. If consecutive requests in the batch use the same LoRA, this function + will combine them into a single request, improving sgmv kernel inference + performance. + 2. At the beginning of each prefill stage inference, recalculations are + needed based on the input, but only once. + """ + + lora_indices_tensor, seq_length_tensor = torch.unique_consecutive( + token_lora_tensor, return_counts=True) + cum_result = torch.cumsum(seq_length_tensor, dim=0) + b_seq_start_tensor = torch.zeros_like(seq_length_tensor) + b_seq_start_tensor[1:].copy_(cum_result[:-1]) + max_length = seq_length_tensor.max().item() + token_nums = seq_length_tensor.sum().item() + batch_size = lora_indices_tensor.size(0) + no_lora = False + # -1 means no lora should be applied. Use `no_lora` to determine whether + # the current step requires LoRA. If LoRA is not needed, the prefill stage + # does not need to launch the triton kernel, which can improve performance + if batch_size == 1 and lora_indices_tensor == -1: + no_lora = True + return (b_seq_start_tensor, seq_length_tensor, lora_indices_tensor, + batch_size, max_length, token_nums, no_lora) + + +# TODO see if this can be vectorized +def convert_mapping( + mapping: "LoRAMapping", + lora_index_to_id: List[Optional[int]], + max_loras: int, + vocab_size: int, + extra_vocab_size: int, + device: torch.device, + long_lora_context: Optional["LongContextLoRAContext"] = None, +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, + Optional[torch.Tensor], List[int]]: + """Converts LoRAMapping to index tensors. + + Args: + mapping: LoRAMapping mapping rows in a batch to LoRA ids. + lora_index_to_id: List mapping LoRA ids to LoRA indices. + max_loras: Maximum number of LoRAs. + vocab_size: Model vocab size. + extra_vocab_size: Extra vocab size each LoRA can have. + long_lora_context: Passed if there are long context lora in a batch. + + Returns: + A tuple of tensors: + base_indices: Tensor of shape [batch_size] mapping batch rows to + LoRA indices. + sampler_indices: Tensor of shape [batch_size] mapping requests to + LoRA indices for sampler. For generation, this will be the + same as base_indicies. For prefill, this will map requests + to LoRA indices. + sampler_indices_padded: Tensor of shape [batch_size] mapping + requests to LoRA indices for sampler with padding. + Same as sampler_indicies, but -1 is replaced with + max_loras. + embeddings_indices: Tensor of shape [2, batch_size] mapping + requests to embedding indices. First row is for embeddings + added by the LoRAs, second row is for the LoRA.lora_a + embeddings. + long_lora_indices: Tensor of shape [batch_size] mapping + requests to RoPE offsets and rot dims for long LoRAs. + None if long context lora doesn't exist. + indices_len: List of lengths of the above tensors. It contains + (base_indices, sampler_indices, sampler_indices_padded, + embeddings_indices, long_lora_indices). + """ + index_mapping_indices: List[int] = list(mapping.index_mapping).copy() + embedding_indices = index_mapping_indices.copy() + lora_indices = index_mapping_indices.copy() + long_lora_offsets: Optional[torch.Tensor] = None + + from vllm.platforms import current_platform + if long_lora_context: + if current_platform.is_hpu(): + long_lora_offsets_list: List[int] = [] + else: + long_lora_offsets = torch.zeros(len(index_mapping_indices), + device=get_device(), + dtype=torch.long) + prompt_mapping: List[int] = [ + lora_index_to_id.index(x) if x > 0 else -1 + for x in mapping.prompt_mapping + ] + lora_idx = None + for i in range(len(index_mapping_indices)): + # TODO index can be slow. optimize + lora_idx = (lora_index_to_id.index(index_mapping_indices[i]) + if index_mapping_indices[i] > 0 else -1) + embedding_indices[i] = lora_idx if index_mapping_indices[i] > 0 else 0 + lora_indices[i] = lora_idx + if long_lora_context: + lora_offset: int = long_lora_context.offsets_by_lora_id.get( + index_mapping_indices[i], 0) + if current_platform.is_hpu(): + long_lora_offsets_list.append(lora_offset) + else: + assert long_lora_offsets is not None + long_lora_offsets[i] = lora_offset + + if long_lora_context and current_platform.is_hpu(): + long_lora_offsets = torch.tensor(long_lora_offsets_list, + device=get_device(), + dtype=torch.long) + + indices_list: List[Union[List[int], torch.Tensor]] = [ + index_mapping_indices, + lora_indices, + embedding_indices, + ] + if long_lora_context: + assert long_lora_offsets is not None + indices_list.append(long_lora_offsets) + indices = torch.tensor(indices_list, dtype=torch.long, device=get_device()) + prompt_mapping_tensor = torch.tensor(prompt_mapping, + device=get_device(), + dtype=torch.long) + embeddings_indices = torch.stack([ + indices[2] * extra_vocab_size, + indices[2] * (vocab_size + extra_vocab_size), + ]) + embeddings_indices[embeddings_indices == -1] = max_loras - 1 + base_indices = indices[1] + sampler_indices = prompt_mapping_tensor + sampler_indices_padded = sampler_indices.clone() + sampler_indices_padded[sampler_indices_padded == -1] = max_loras - 1 + sampler_indices_padded = torch.arange( + 0, len(sampler_indices_padded), device=get_device(), + dtype=torch.long) + (sampler_indices_padded * + len(sampler_indices_padded)) + long_lora_indices = None + long_lora_indices_len: Optional[int] = None + if long_lora_context: + long_lora_indices = indices[3] + long_lora_indices_len = long_lora_indices.shape[-1] + # Contain length of indices tensors. Used to index into each tensor. + indices_len = [ + base_indices.shape[-1], + sampler_indices.shape[-1], + sampler_indices_padded.shape[-1], + embeddings_indices.shape[-1], + ] + if long_lora_indices_len is not None: + indices_len.append(long_lora_indices_len) + else: + # If long_lora doesn't exist,append None + indices_len.append(None) + + return ( + base_indices, + sampler_indices, + sampler_indices_padded, + embeddings_indices, + long_lora_indices, + indices_len, + ) + + +class PunicaWrapper: + """ + PunicaWrapper is designed to manage and provide metadata for the punica + kernel. The main function is to maintain the state information for + Multi-LoRA, and to provide the interface for the punica kernel. + """ + + def __init__(self, max_num_batched_tokens: int, max_batches: int, + device: Union[torch.device, str]): + self._token_lora_indices = torch.empty(max_num_batched_tokens, + dtype=torch.long, + device=device) + self._sampler_indices = torch.empty(max_num_batched_tokens, + dtype=torch.long, + device=device) + self._sampler_indices_padded = torch.empty(max_num_batched_tokens, + dtype=torch.long, + device=device) + self._embeddings_indices = torch.empty(2, + max_num_batched_tokens, + dtype=torch.long, + device=device) + self._long_lora_indices = torch.empty(max_num_batched_tokens, + dtype=torch.long, + device=device) + + # 5 is the number of indicies tensors. + # base_indices, sampler_indices, sampler_indices_padded, + # embeddings_indices,long_lora_indices + self.indices_len: List[Optional[int]] = [None] * 5 + # these attributes are the information required for sgmv kernel + self._seq_start_locs = torch.empty(max_batches, + dtype=torch.long, + device=device) + self._seq_lengths = torch.empty(max_batches, + dtype=torch.long, + device=device) + self._lora_indices_per_batch = torch.empty(max_batches, + dtype=torch.long, + device=device) + self.device: torch.device = device + self.max_length: int = 0 + self.token_nums: int = 0 + self.batch_size: int = -1 + self.is_prefill = False + self.no_lora = False + + def update_metadata( + self, + mapping: "LoRAMapping", + lora_index_to_id: List[Optional[int]], + max_loras: int, + vocab_size: int, + extra_vocab_size: int, + long_lora_context: Optional["LongContextLoRAContext"] = None, + ): + + self._update_base_metadata(mapping, lora_index_to_id, max_loras, + vocab_size, extra_vocab_size, + long_lora_context) + if mapping.is_prefill: + # Update metadata required for prefill-related operators. + self._update_prefill_metada(self.token_lora_indices) + self.is_prefill = True + else: + self.is_prefill = False + + def _update_base_metadata( + self, + mapping: "LoRAMapping", + lora_index_to_id: List[Optional[int]], + max_loras: int, + vocab_size: int, + extra_vocab_size: int, + long_lora_context: Optional["LongContextLoRAContext"] = None, + ): + ( + base_indices, + sampler_indices, + sampler_indices_padded, + embeddings_indices, + long_lora_offsets_tensor, + indices_len, + ) = convert_mapping( + mapping, + lora_index_to_id, + max_loras, + vocab_size, + extra_vocab_size, + self.device, + long_lora_context, + ) + self._token_lora_indices[:base_indices.shape[0]].copy_(base_indices) + self._sampler_indices[:sampler_indices.shape[0]].copy_(sampler_indices) + self._sampler_indices_padded[:sampler_indices_padded.shape[0]].copy_( + sampler_indices_padded) + self._embeddings_indices[:embeddings_indices. + shape[0], :embeddings_indices.shape[1]].copy_( + embeddings_indices) + if long_lora_offsets_tensor is not None: + self._long_lora_indices[:long_lora_offsets_tensor.shape[0]].copy_( + long_lora_offsets_tensor) + else: + self._long_lora_indices.zero_() + self.indices_len[:] = indices_len + + def _update_prefill_metada(self, token_lora_tensor: torch.Tensor) -> None: + + (b_seq_start_tensor, seq_length_tensor, lora_indices_tensor, + batch_size, max_length, token_nums, + no_lora) = compute_meta(token_lora_tensor) + + self._seq_start_locs[:b_seq_start_tensor.shape[0]].copy_( + b_seq_start_tensor) + self._seq_lengths[:seq_length_tensor.shape[0]].copy_(seq_length_tensor) + self._lora_indices_per_batch[:lora_indices_tensor.shape[0]].copy_( + lora_indices_tensor) + self.batch_size = batch_size + self.max_length = max_length + self.token_nums = token_nums + self.no_lora = no_lora + + @property + def prefill_metadata( + self + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int, int, int]: + """ + This property provides a convenient way to access the necessary + metadata for prefill-related kernel computations. + 1. seq_start_locs: Tensor of sequence start positions. + 2. seq_lengths: Tensor of sequence lengths. + 3. lora_indices_per_batch: Tensor of lora indices, and an index of + -1 means no lora should be applied. + 4. batch_size: Batch size after clustering identical lora indices. + 5. max_length: The maximum sequence length in the batch. + 6. token_nums: The token numbers in the batch. + """ + return (self._seq_start_locs[:self.batch_size], + self._seq_lengths[:self.batch_size], + self._lora_indices_per_batch[:self.batch_size], + self.batch_size, self.max_length, self.token_nums) + + @property + def token_lora_indices(self) -> torch.Tensor: + """ + This property provides the lora indices corresponding to each token + in the batch. An index of -1 means no lora should be applied. + """ + token_lora_len = self.indices_len[0] + return self._token_lora_indices[:token_lora_len] + + @property + def sampler_indices(self) -> torch.Tensor: + """ + This property is used to access the lora indices specifically for + LogitsProcessorWithLoRA. + """ + sampler_indices_len = self.indices_len[1] + return self._sampler_indices[:sampler_indices_len] + + @property + def sampler_indices_padded(self) -> torch.Tensor: + """ + This property provides access to padded sampler indices. + """ + indices_padded_len = self.indices_len[2] + return self._sampler_indices_padded[:indices_padded_len] + + @property + def embeddings_indices(self) -> torch.Tensor: + """ + This property provides access to the indices used for lora embeddings, + specifically for VocabParallelEmbeddingWithLoRA. + """ + embeddings_indices_len = self.indices_len[3] + return self._embeddings_indices[:, :embeddings_indices_len] + + @property + def long_lora_indices(self) -> torch.Tensor: + """ + This property provides access to the indices used for long context + lora, specifically for LinearScalingRotaryEmbeddingWithLora. + """ + long_lora_len = self.indices_len[4] + return self._long_lora_indices[:long_lora_len] + + def _shrink_prefill( + self, + y: torch.Tensor, + x: torch.Tensor, + w_t_all: torch.Tensor, + scale: float, + ): + #No LoRA request, so return directly + if self.no_lora: + return + sgmv_shrink( + x, + w_t_all, + y, + *self.prefill_metadata, + scale, + ) + + def _shrink_decode( + self, + y: torch.Tensor, + x: torch.Tensor, + w_t_all: torch.Tensor, + scale: float, + ): + bgmv_shrink(x, w_t_all, y, self.token_lora_indices, scale) + + def _expand_prefill( + self, + y: torch.Tensor, + x: torch.Tensor, + w_t_all: torch.Tensor, + add_input: bool, + ): + #No LoRA request, so return directly + if self.no_lora: + return + sgmv_expand( + x, + w_t_all, + y, + *self.prefill_metadata, + add_input, + ) + + def _expand_decode( + self, + y: torch.Tensor, + x: torch.Tensor, + w_t_all: torch.Tensor, + add_input: bool, + ): + bgmv_expand(x, w_t_all, y, self.token_lora_indices, add_input) + + def _expand_slice_prefill( + self, + y: torch.Tensor, + x: torch.Tensor, + w_t_all: torch.Tensor, + y_offset: Optional[int], + y_slice_size: Optional[int], + add_input: bool, + ): + #No LoRA request, so return directly + if self.no_lora: + return + sgmv_expand_slice( + x, + w_t_all, + y, + *self.prefill_metadata, + y_offset, + y_slice_size, + add_input, + ) + + def _expand_slice_decode( + self, + y: torch.Tensor, + x: torch.Tensor, + w_t_all: torch.Tensor, + y_offset: Optional[int], + y_slice_size: Optional[int], + add_input: bool, + ): + bgmv_expand_slice(x, w_t_all, y, self.token_lora_indices, y_offset, + y_slice_size, add_input) + + def _apply_expand(self, + y: torch.Tensor, + x: torch.Tensor, + w_t_all: torch.Tensor, + y_offset: Optional[int], + y_slice_size: Optional[int], + add_input: bool = True): + """ + Perform the ` y[:,y_offset:y_offset+y_slice_size]+=x@w_t_all` + computation, which is suitable for the + GEMM of lora'b. + """ + + expand_slice_fun: Callable = (self._expand_slice_prefill + if self.is_prefill else + self._expand_slice_decode) + expand_slice_fun(y, x, w_t_all, y_offset, y_slice_size, add_input) + + def _apply_bias( + self, + indices: torch.Tensor, + output: torch.Tensor, + output_slices: Tuple[int, ...], + lora_bias_stacked: Tuple[Optional[torch.Tensor], ...], + ): + """Applies bias to output + + Input shapes: + lora_bias_stacked: 3 element tuple of (num_loras, output_dim) + indices: (batch_size) + output: (batch_size, q_slice_size + 2*kv_slice_size) + output_slices: n-1 element tuple of (slice_size...), + where n is number of slices + """ + org_output = output + output = output.view(-1, output.shape[-1]) + indices = indices.view(-1) + + offset_left = 0 + for slice_idx, slice in enumerate(output_slices): + bias = lora_bias_stacked[slice_idx] + if bias is not None: + bias = bias.view(-1, bias.shape[-1]) + bias = bias[indices] + bias[indices == -1] = 0 + output[:, offset_left:offset_left + slice] += bias + offset_left += slice + + return output.view_as(org_output) + + def _apply_shrink( + self, + y: torch.Tensor, + x: torch.Tensor, + w_t_all: torch.Tensor, + scale: float, + ): + """ + Perform the ` y+=x@w_t_all` computation, which is suitable for the + GEMM of lora'a. + When `is_prefill is` true, it indicates that it is currently the + prefill stage, and the `_shrink_prefill` function should be called. + Otherwise, it is the decode stage, and the _shrink_decode function + should be called. + """ + y_org = y + y = y.view(-1, y.shape[-1]) + shrink_fun: Callable = (self._shrink_prefill + if self.is_prefill else self._shrink_decode) + shrink_fun(y, x, w_t_all, scale) + y = y.view_as(y_org) + + def add_shrink( + self, + y: Union[Tuple[torch.Tensor, ...], torch.Tensor], + x: torch.Tensor, + lora_a_stacked: Tuple[torch.Tensor, ...], + scale: float, + ): + """ + Performs GEMM for multiple slices of lora_a. + When `is_prefill is` true, it indicates that it is currently the + prefill stage, and the `_shrink_prefill` function should be called. + Otherwise, it is the decode stage, and the _shrink_decode function + should be called. + + Semantics: + for i in range(len(lora_a_stacked)): + y[i] += (x @ lora_a_stacked[i]) * scale + + Args: + y (Union[Tuple[torch.Tensor, ...], torch.Tensor]): Output tensors + x (torch.Tensor): Input tensor + lora_a_stacked (Tuple[torch.Tensor, ...]): lora_a's weights + scale (float): Scaling factor for the operation + """ + + x = x.view(-1, x.shape[-1]) + # TODO fuse these kernels + for slice_idx in range(len(lora_a_stacked)): + self._apply_shrink(y[slice_idx], x, lora_a_stacked[slice_idx], + scale) + + def add_expand( + self, + y: torch.Tensor, + x: Union[Tuple[torch.Tensor, ...], torch.Tensor], + lora_b_stacked: Tuple[torch.Tensor, ...], + lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], + output_slices: Tuple[int, ...], + offset_start: int = 0, + add_input=True, + ) -> None: + """ + Performs GEMM and bias addition for multiple slices of lora_b. + + Semantics: + for i in range(len(lora_b_stacked)): + slice = output_slices[i] + y[:, offset:offset+slice] += x[i] @ lora_b_stacked[i] + + lora_bias_stacked[i] + offset += slice + + Args: + y (torch.Tensor): Output tensor. + x (Union[Tuple[torch.Tensor, ...], torch.Tensor]): Input tensors + lora_b_stacked (Tuple[torch.Tensor, ...]): lora_b's weight + lora_bias_stacked (Optional[Tuple[torch.Tensor, ...]]): + bias's weight + output_slices (Tuple[int, ...]): Every slice's size + add_input (bool): Defaults to True. + """ + y_org = y + y = y.view(-1, y.shape[-1]) + offset_left = offset_start + if lora_bias_stacked is not None: + self._apply_bias(self.token_lora_indices, y, output_slices, + lora_bias_stacked) + for slice_idx in range(len(lora_b_stacked)): + self._apply_expand( + y, + x[slice_idx], + lora_b_stacked[slice_idx], + offset_left, + output_slices[slice_idx], + add_input=add_input, + ) + offset_left += output_slices[slice_idx] + y = y.view_as(y_org) + + def add_lora_embedding( + self, + y: torch.Tensor, + x: torch.Tensor, + lora_b_stacked: torch.Tensor, + add_input: bool = True, + ): + """ + Applies lora specifically for VocabParallelEmbeddingWithLoRA. + + Semantics: + y += x @ lora_b_stacked + + Args: + y (torch.Tensor): Output tensor. + x (torch.Tensor): Input tensor. + lora_b_stacked (torch.Tensor): lora_b's weights. + add_input (bool): Default to True. + + """ + + # Embedding layer only need expand op + expand_fun: Callable = (self._expand_prefill + if self.is_prefill else self._expand_decode) + expand_fun(y, x, lora_b_stacked, add_input) + + def add_lora_linear( + self, + y: torch.Tensor, + x: torch.Tensor, + lora_a_stacked: Tuple[torch.Tensor, ...], + lora_b_stacked: Tuple[torch.Tensor, ...], + lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], + scale: float, + output_slices: Tuple[int, ...], + *, + buffer: Optional[Tuple[torch.Tensor, ...]] = None) -> None: + """ + Applicable to linear-related lora. + + Semantics: + for i in range(len(lora_a_stacked)): + y[i] += ( + x[i].unsqueeze(0) + @ lora_a_stacked[indices[i], layer_idx, :, :] + @ lora_b_stacked[indices[i], layer_idx, :, :] + * scale + ).squeeze(0)+lora_bias_stacked[i] + + Args: + y (torch.Tensor): Output tensor. Will be changed in-place. + x (torch.Tensor): Input tensor + lora_a_stacked (Tuple[torch.Tensor, ...]): lora_a's weight. + lora_b_stacked (Tuple[torch.Tensor, ...]): lora_b's weight. + lora_bias_stacked (Optional[Tuple[torch.Tensor, ...]]): lora's bias. + scale (float): Scaling factor. + output_slices (Tuple[int, ...]): Every slice's size. + buffer (Optional[Tuple[torch.Tensor, ...]]): Defaults to None. + """ + + assert len(lora_a_stacked) == len(lora_b_stacked) == len(output_slices) + if lora_bias_stacked is not None: + assert len(lora_bias_stacked) == len(output_slices) + y = self._apply_bias(self.token_lora_indices, y, output_slices, + lora_bias_stacked) + + if buffer is None: + r = lora_b_stacked[0].size(-1) + # We set the buffer to be float32 by default ,refer to: + # https://github.com/triton-lang/triton/issues/1387 + buffer = tuple( + torch.zeros( + (x.size(0), r), dtype=torch.float32, device=x.device) + for _ in range(len(output_slices))) + self.add_shrink(buffer, x, lora_a_stacked, scale) + self.add_expand(y, + buffer, + lora_b_stacked, + None, + output_slices, + add_input=True) + + def add_lora_logits(self, + y: torch.Tensor, + x: torch.Tensor, + lora_a_stacked: torch.Tensor, + lora_b_stacked: torch.Tensor, + scale, + *, + buffer: Optional[torch.Tensor] = None) -> None: + """ + Applies lora specifically for LogitsProcessorWithLoRA. + + Semantics: + buffer = (x @ lora_a_stacked) * scale + y += buffer @ lora_b_stacked + + Args: + y (torch.Tensor): Output tensor. + x (torch.Tensor): Input tensor. + lora_a_stacked (torch.Tensor): lora_a's weights. + lora_b_stacked (torch.Tensor):lora_b's weights. + scale (float): Scaling factor. + buffer (Optional[torch.Tensor]):Default to None. + """ + y_org = y + y = y.view(-1, y.shape[-1]) + x = x.view(-1, x.shape[-1]) + r = lora_b_stacked.size(-1) + if buffer is None: + # We set the buffer to be float32 by default ,refer to: + # https://github.com/triton-lang/triton/issues/1387 + buffer = torch.zeros((x.size(0), r), + dtype=torch.float32, + device=x.device) + # LogitsProcessorWithLoRA always using bgmv. + bgmv_shrink(x, lora_a_stacked, buffer, self.sampler_indices, scale) + bgmv_expand(buffer, + lora_b_stacked, + y, + self.sampler_indices, + add_inputs=True) + y = y.view_as(y_org) diff --git a/vllm/lora/punica_wrapper/__init__.py b/vllm/lora/punica_wrapper/__init__.py deleted file mode 100644 index 48ada3926ea46..0000000000000 --- a/vllm/lora/punica_wrapper/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from vllm.lora.punica_wrapper.punica_base import PunicaWrapperBase -from vllm.lora.punica_wrapper.punica_selector import get_punica_wrapper - -__all__ = [ - "PunicaWrapperBase", - "get_punica_wrapper", -] diff --git a/vllm/lora/punica_wrapper/punica_base.py b/vllm/lora/punica_wrapper/punica_base.py deleted file mode 100644 index 0a5a84bdd8deb..0000000000000 --- a/vllm/lora/punica_wrapper/punica_base.py +++ /dev/null @@ -1,480 +0,0 @@ -""" -Based on: -Chen, L., Ye, Z., Wu, Y., Zhuo, D., Ceze, L., & Krishnamurthy, A. (2023). -Punica: Multi-Tenant LoRA Serving. -https://arxiv.org/abs/2310.18547 -""" - -from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, List, Optional, Tuple, Union - -import torch - -from .utils import compute_meta, convert_mapping - -if TYPE_CHECKING: - # avoid circuit import - from vllm.lora.layers import LoRAMapping - from vllm.lora.models import LongContextLoRAContext - - -class PunicaWrapperABC(ABC): - """ - PunicaWrapper ABC. - """ - - @abstractmethod - def update_metadata( - self, - mapping: "LoRAMapping", - lora_index_to_id: List[Optional[int]], - max_loras: int, - vocab_size: int, - extra_vocab_size: int, - long_lora_context: Optional["LongContextLoRAContext"] = None, - **kwargs, - ) -> None: - """ - Update the lora-related metadata - """ - raise NotImplementedError - - @abstractmethod - def add_shrink( - self, - y: Union[Tuple[torch.Tensor, ...], torch.Tensor], - x: torch.Tensor, - lora_a_stacked: Tuple[torch.Tensor, ...], - scale: float, - **kwargs, - ) -> None: - """ - Performs GEMM for multiple slices of lora_a. - """ - - raise NotImplementedError - - @abstractmethod - def add_expand( - self, - y: torch.Tensor, - x: Union[Tuple[torch.Tensor, ...], torch.Tensor], - lora_b_stacked: Tuple[torch.Tensor, ...], - lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], - output_slices: Tuple[int, ...], - offset_start: int = 0, - add_input=True, - **kwargs, - ) -> None: - """ - Performs GEMM and bias addition for multiple slices of lora_b. - """ - raise NotImplementedError - - @abstractmethod - def add_lora_embedding( - self, - y: torch.Tensor, - x: torch.Tensor, - lora_b_stacked: torch.Tensor, - add_input: bool = True, - **kwargs, - ) -> None: - """ - Applies lora specifically for VocabParallelEmbeddingWithLoRA, - and this layer only requires the expand operation. - """ - raise NotImplementedError - - @abstractmethod - def add_lora_linear(self, - y: torch.Tensor, - x: torch.Tensor, - lora_a_stacked: Tuple[torch.Tensor, ...], - lora_b_stacked: Tuple[torch.Tensor, ...], - lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], - scale: float, - output_slices: Tuple[int, ...], - *, - buffer: Optional[Tuple[torch.Tensor, ...]] = None, - **kwargs) -> None: - """ - Applicable to linear-related lora. - """ - - raise NotImplementedError - - @abstractmethod - def add_lora_logits(self, - y: torch.Tensor, - x: torch.Tensor, - lora_a_stacked: torch.Tensor, - lora_b_stacked: torch.Tensor, - scale, - *, - buffer: Optional[torch.Tensor] = None, - **kwargs) -> None: - """ - Applies lora specifically for LogitsProcessorWithLoRA. - """ - raise NotImplementedError - - -class PunicaWrapperBase(PunicaWrapperABC): - """ - PunicaWrapperBase is designed to manage and provide metadata for the punica - kernel. The main function is to maintain the state information for - Multi-LoRA, and to provide the interface for the punica. - """ - - def __init__(self, max_num_batched_tokens: int, max_batches: int, - device: Union[torch.device, str], **kwargs): - self._token_lora_indices = torch.empty(max_num_batched_tokens, - dtype=torch.long, - device=device) - self._sampler_indices = torch.empty(max_num_batched_tokens, - dtype=torch.long, - device=device) - self._sampler_indices_padded = torch.empty(max_num_batched_tokens, - dtype=torch.long, - device=device) - self._embeddings_indices = torch.empty(2, - max_num_batched_tokens, - dtype=torch.long, - device=device) - self._long_lora_indices = torch.empty(max_num_batched_tokens, - dtype=torch.long, - device=device) - - # 5 is the number of indicies tensors. - # base_indices, sampler_indices, sampler_indices_padded, - # embeddings_indices,long_lora_indices - self.indices_len: List[Optional[int]] = [None] * 5 - # these attributes are the information required for sgmv kernel - self._seq_start_locs = torch.empty(max_batches, - dtype=torch.long, - device=device) - self._seq_lengths = torch.empty(max_batches, - dtype=torch.long, - device=device) - self._lora_indices_per_batch = torch.empty(max_batches, - dtype=torch.long, - device=device) - self.device: torch.device = device - self.max_length: int = 0 - self.token_nums: int = 0 - self.batch_size: int = -1 - self.is_prefill = False - self.no_lora = False - - def _update_base_metadata( - self, - mapping: "LoRAMapping", - lora_index_to_id: List[Optional[int]], - max_loras: int, - vocab_size: int, - extra_vocab_size: int, - long_lora_context: Optional["LongContextLoRAContext"] = None, - ): - ( - base_indices, - sampler_indices, - sampler_indices_padded, - embeddings_indices, - long_lora_offsets_tensor, - indices_len, - ) = convert_mapping( - mapping, - lora_index_to_id, - max_loras, - vocab_size, - extra_vocab_size, - self.device, - long_lora_context, - ) - self._token_lora_indices[:base_indices.shape[0]].copy_(base_indices) - self._sampler_indices[:sampler_indices.shape[0]].copy_(sampler_indices) - self._sampler_indices_padded[:sampler_indices_padded.shape[0]].copy_( - sampler_indices_padded) - self._embeddings_indices[:embeddings_indices. - shape[0], :embeddings_indices.shape[1]].copy_( - embeddings_indices) - if long_lora_offsets_tensor is not None: - self._long_lora_indices[:long_lora_offsets_tensor.shape[0]].copy_( - long_lora_offsets_tensor) - else: - self._long_lora_indices.zero_() - self.indices_len[:] = indices_len - - def _update_prefill_metada(self, token_lora_tensor: torch.Tensor) -> None: - - (b_seq_start_tensor, seq_length_tensor, lora_indices_tensor, - batch_size, max_length, token_nums, - no_lora) = compute_meta(token_lora_tensor) - - self._seq_start_locs[:b_seq_start_tensor.shape[0]].copy_( - b_seq_start_tensor) - self._seq_lengths[:seq_length_tensor.shape[0]].copy_(seq_length_tensor) - self._lora_indices_per_batch[:lora_indices_tensor.shape[0]].copy_( - lora_indices_tensor) - self.batch_size = batch_size - self.max_length = max_length - self.token_nums = token_nums - self.no_lora = no_lora - - def _apply_bias( - self, - indices: torch.Tensor, - output: torch.Tensor, - output_slices: Tuple[int, ...], - lora_bias_stacked: Tuple[Optional[torch.Tensor], ...], - ): - """Applies bias to output - - Input shapes: - lora_bias_stacked: 3 element tuple of (num_loras, output_dim) - indices: (batch_size) - output: (batch_size, q_slice_size + 2*kv_slice_size) - output_slices: n-1 element tuple of (slice_size...), - where n is number of slices - """ - org_output = output - output = output.view(-1, output.shape[-1]) - indices = indices.view(-1) - - offset_left = 0 - for slice_idx, slice in enumerate(output_slices): - bias = lora_bias_stacked[slice_idx] - if bias is not None: - bias = bias.view(-1, bias.shape[-1]) - bias = bias[indices] - bias[indices == -1] = 0 - output[:, offset_left:offset_left + slice] += bias - offset_left += slice - - return output.view_as(org_output) - - @property - def prefill_metadata( - self - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int, int, int]: - """ - This property provides a convenient way to access the necessary - metadata for prefill-related kernel computations. - 1. seq_start_locs: Tensor of sequence start positions. - 2. seq_lengths: Tensor of sequence lengths. - 3. lora_indices_per_batch: Tensor of lora indices, and an index of - -1 means no lora should be applied. - 4. batch_size: Batch size after clustering identical lora indices. - 5. max_length: The maximum sequence length in the batch. - 6. token_nums: The token numbers in the batch. - """ - return (self._seq_start_locs[:self.batch_size], - self._seq_lengths[:self.batch_size], - self._lora_indices_per_batch[:self.batch_size], - self.batch_size, self.max_length, self.token_nums) - - @property - def token_lora_indices(self) -> torch.Tensor: - """ - This property provides the lora indices corresponding to each token - in the batch. An index of -1 means no lora should be applied. - """ - token_lora_len = self.indices_len[0] - return self._token_lora_indices[:token_lora_len] - - @property - def sampler_indices(self) -> torch.Tensor: - """ - This property is used to access the lora indices specifically for - LogitsProcessorWithLoRA. - """ - sampler_indices_len = self.indices_len[1] - return self._sampler_indices[:sampler_indices_len] - - @property - def sampler_indices_padded(self) -> torch.Tensor: - """ - This property provides access to padded sampler indices. - """ - indices_padded_len = self.indices_len[2] - return self._sampler_indices_padded[:indices_padded_len] - - @property - def embeddings_indices(self) -> torch.Tensor: - """ - This property provides access to the indices used for lora embeddings, - specifically for VocabParallelEmbeddingWithLoRA. - """ - embeddings_indices_len = self.indices_len[3] - return self._embeddings_indices[:, :embeddings_indices_len] - - @property - def long_lora_indices(self) -> torch.Tensor: - """ - This property provides access to the indices used for long context - lora, specifically for LinearScalingRotaryEmbeddingWithLora. - """ - long_lora_len = self.indices_len[4] - return self._long_lora_indices[:long_lora_len] - - def update_metadata( - self, - mapping: "LoRAMapping", - lora_index_to_id: List[Optional[int]], - max_loras: int, - vocab_size: int, - extra_vocab_size: int, - long_lora_context: Optional["LongContextLoRAContext"] = None, - **kwargs): - - self._update_base_metadata(mapping, lora_index_to_id, max_loras, - vocab_size, extra_vocab_size, - long_lora_context) - if mapping.is_prefill: - # Update metadata required for prefill-related operators. - self._update_prefill_metada(self.token_lora_indices) - self.is_prefill = True - else: - self.is_prefill = False - - @abstractmethod - def add_shrink(self, y: Union[Tuple[torch.Tensor, ...], torch.Tensor], - x: torch.Tensor, lora_a_stacked: Tuple[torch.Tensor, ...], - scale: float, **kwargs) -> None: - """ - Performs GEMM for multiple slices of lora_a. - - Semantics: - for i in range(len(lora_a_stacked)): - y[i] += (x @ lora_a_stacked[i]) * scale - - Args: - y (Union[Tuple[torch.Tensor, ...], torch.Tensor]): Output tensors - x (torch.Tensor): Input tensor - lora_a_stacked (Tuple[torch.Tensor, ...]): lora_a's weights - scale (float): Scaling factor for the operation - - """ - # TODO: implement it based on torch ops - raise NotImplementedError - - @abstractmethod - def add_expand(self, - y: torch.Tensor, - x: Union[Tuple[torch.Tensor, ...], torch.Tensor], - lora_b_stacked: Tuple[torch.Tensor, ...], - lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], - output_slices: Tuple[int, ...], - offset_start: int = 0, - add_input=True, - **kwargs) -> None: - """ - Performs GEMM and bias addition for multiple slices of lora_b. - - Semantics: - for i in range(len(lora_b_stacked)): - slice = output_slices[i] - y[:, offset:offset+slice] += x[i] @ lora_b_stacked[i] + - lora_bias_stacked[i] - offset += slice - - Args: - y (torch.Tensor): Output tensor. - x (Union[Tuple[torch.Tensor, ...], torch.Tensor]): Input tensors - lora_b_stacked (Tuple[torch.Tensor, ...]): lora_b's weight - lora_bias_stacked (Optional[Tuple[torch.Tensor, ...]]): - bias's weight - output_slices (Tuple[int, ...]): Every slice's size - add_input (bool): Defaults to True. - - """ - # TODO: implement it based on torch ops - raise NotImplementedError - - @abstractmethod - def add_lora_embedding(self, - y: torch.Tensor, - x: torch.Tensor, - lora_b_stacked: torch.Tensor, - add_input: bool = True, - **kwargs) -> None: - """ - Applies lora specifically for VocabParallelEmbeddingWithLoRA. - and this layer only requires the expand operation. - Semantics: - y += x @ lora_b_stacked - - Args: - y (torch.Tensor): Output tensor. - x (torch.Tensor): Input tensor. - lora_b_stacked (torch.Tensor): lora_b's weights. - add_input (bool): Default to True. - """ - # TODO: implement it based on torch ops - raise NotImplementedError - - @abstractmethod - def add_lora_linear(self, - y: torch.Tensor, - x: torch.Tensor, - lora_a_stacked: Tuple[torch.Tensor, ...], - lora_b_stacked: Tuple[torch.Tensor, ...], - lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], - scale: float, - output_slices: Tuple[int, ...], - *, - buffer: Optional[Tuple[torch.Tensor, ...]] = None, - **kwargs) -> None: - """ - Applicable to linear-related lora. - - Semantics: - for i in range(len(lora_a_stacked)): - y[i] += ( - x[i].unsqueeze(0) - @ lora_a_stacked[indices[i], layer_idx, :, :] - @ lora_b_stacked[indices[i], layer_idx, :, :] - * scale - ).squeeze(0)+lora_bias_stacked[i] - - Args: - y (torch.Tensor): Output tensor. Will be changed in-place. - x (torch.Tensor): Input tensor - lora_a_stacked (Tuple[torch.Tensor, ...]): lora_a's weight. - lora_b_stacked (Tuple[torch.Tensor, ...]): lora_b's weight. - lora_bias_stacked (Optional[Tuple[torch.Tensor, ...]]): lora's bias. - scale (float): Scaling factor. - output_slices (Tuple[int, ...]): Every slice's size. - buffer (Optional[Tuple[torch.Tensor, ...]]): Defaults to None. - """ - # TODO: implement it based on torch ops - raise NotImplementedError - - @abstractmethod - def add_lora_logits(self, - y: torch.Tensor, - x: torch.Tensor, - lora_a_stacked: torch.Tensor, - lora_b_stacked: torch.Tensor, - scale, - *, - buffer: Optional[torch.Tensor] = None, - **kwargs) -> None: - """ - Applies lora specifically for LogitsProcessorWithLoRA. - - Semantics: - buffer = (x @ lora_a_stacked) * scale - y += buffer @ lora_b_stacked - - Args: - y (torch.Tensor): Output tensor. - x (torch.Tensor): Input tensor. - lora_a_stacked (torch.Tensor): lora_a's weights. - lora_b_stacked (torch.Tensor):lora_b's weights. - scale (float): Scaling factor. - buffer (Optional[torch.Tensor]):Default to None. - """ - # TODO: implement it based on torch ops - raise NotImplementedError diff --git a/vllm/lora/punica_wrapper/punica_gpu.py b/vllm/lora/punica_wrapper/punica_gpu.py deleted file mode 100644 index b2af29de129ce..0000000000000 --- a/vllm/lora/punica_wrapper/punica_gpu.py +++ /dev/null @@ -1,358 +0,0 @@ -""" -Based on: -Chen, L., Ye, Z., Wu, Y., Zhuo, D., Ceze, L., & Krishnamurthy, A. (2023). -Punica: Multi-Tenant LoRA Serving. -https://arxiv.org/abs/2310.18547 -""" - -from typing import Callable, Optional, Tuple, Union, final - -import torch - -from vllm.triton_utils import HAS_TRITON - -if HAS_TRITON: - from vllm.lora.ops.bgmv_expand import bgmv_expand - from vllm.lora.ops.bgmv_expand_slice import bgmv_expand_slice - from vllm.lora.ops.bgmv_shrink import bgmv_shrink - from vllm.lora.ops.sgmv_expand import sgmv_expand - from vllm.lora.ops.sgmv_expand_slice import sgmv_expand_slice - from vllm.lora.ops.sgmv_shrink import sgmv_shrink - -from .punica_base import PunicaWrapperBase - - -@final -class PunicaWrapperGPU(PunicaWrapperBase): - """ - PunicaWrapperGPU is designed to manage and provide metadata for the punica - kernel. The main function is to maintain the state information for - Multi-LoRA, and to provide the interface for the punica triton kernel. - """ - - def __init__(self, max_num_batched_tokens: int, max_batches: int, - device: Union[torch.device, str], **kwargs): - PunicaWrapperBase.__init__(self, max_num_batched_tokens, max_batches, - device) - - def _shrink_prefill( - self, - y: torch.Tensor, - x: torch.Tensor, - w_t_all: torch.Tensor, - scale: float, - ): - #No LoRA request, so return directly - if self.no_lora: - return - sgmv_shrink( - x, - w_t_all, - y, - *self.prefill_metadata, - scale, - ) - - def _shrink_decode( - self, - y: torch.Tensor, - x: torch.Tensor, - w_t_all: torch.Tensor, - scale: float, - ): - bgmv_shrink(x, w_t_all, y, self.token_lora_indices, scale) - - def _expand_prefill( - self, - y: torch.Tensor, - x: torch.Tensor, - w_t_all: torch.Tensor, - add_input: bool, - ): - #No LoRA request, so return directly - if self.no_lora: - return - sgmv_expand( - x, - w_t_all, - y, - *self.prefill_metadata, - add_input, - ) - - def _expand_decode( - self, - y: torch.Tensor, - x: torch.Tensor, - w_t_all: torch.Tensor, - add_input: bool, - ): - bgmv_expand(x, w_t_all, y, self.token_lora_indices, add_input) - - def _expand_slice_prefill( - self, - y: torch.Tensor, - x: torch.Tensor, - w_t_all: torch.Tensor, - y_offset: Optional[int], - y_slice_size: Optional[int], - add_input: bool, - ): - #No LoRA request, so return directly - if self.no_lora: - return - sgmv_expand_slice( - x, - w_t_all, - y, - *self.prefill_metadata, - y_offset, - y_slice_size, - add_input, - ) - - def _expand_slice_decode( - self, - y: torch.Tensor, - x: torch.Tensor, - w_t_all: torch.Tensor, - y_offset: Optional[int], - y_slice_size: Optional[int], - add_input: bool, - ): - bgmv_expand_slice(x, w_t_all, y, self.token_lora_indices, y_offset, - y_slice_size, add_input) - - def _apply_expand( - self, - y: torch.Tensor, - x: torch.Tensor, - w_t_all: torch.Tensor, - y_offset: Optional[int], - y_slice_size: Optional[int], - add_input: bool = True, - ): - """ - Perform the ` y[:,y_offset:y_offset+y_slice_size]+=x@w_t_all` - computation, which is suitable for the - GEMM of lora'b. - """ - - expand_slice_fun: Callable = (self._expand_slice_prefill - if self.is_prefill else - self._expand_slice_decode) - expand_slice_fun(y, x, w_t_all, y_offset, y_slice_size, add_input) - - def _apply_shrink(self, y: torch.Tensor, x: torch.Tensor, - w_t_all: torch.Tensor, scale: float): - """ - Perform the ` y+=x@w_t_all` computation, which is suitable for the - GEMM of lora'a. - When `is_prefill is` true, it indicates that it is currently the - prefill stage, and the `_shrink_prefill` function should be called. - Otherwise, it is the decode stage, and the _shrink_decode function - should be called. - """ - y_org = y - y = y.view(-1, y.shape[-1]) - shrink_fun: Callable = (self._shrink_prefill - if self.is_prefill else self._shrink_decode) - shrink_fun(y, x, w_t_all, scale) - y = y.view_as(y_org) - - def add_shrink(self, y: Union[Tuple[torch.Tensor, ...], torch.Tensor], - x: torch.Tensor, lora_a_stacked: Tuple[torch.Tensor, ...], - scale: float, **kwargs): - """ - Performs GEMM for multiple slices of lora_a. - When `is_prefill is` true, it indicates that it is currently the - prefill stage, and the `_shrink_prefill` function should be called. - Otherwise, it is the decode stage, and the _shrink_decode function - should be called. - - Semantics: - for i in range(len(lora_a_stacked)): - y[i] += (x @ lora_a_stacked[i]) * scale - - Args: - y (Union[Tuple[torch.Tensor, ...], torch.Tensor]): Output tensors - x (torch.Tensor): Input tensor - lora_a_stacked (Tuple[torch.Tensor, ...]): lora_a's weights - scale (float): Scaling factor for the operation - """ - - x = x.view(-1, x.shape[-1]) - # TODO fuse these kernels - for slice_idx in range(len(lora_a_stacked)): - self._apply_shrink(y[slice_idx], x, lora_a_stacked[slice_idx], - scale) - - def add_expand(self, - y: torch.Tensor, - x: Union[Tuple[torch.Tensor, ...], torch.Tensor], - lora_b_stacked: Tuple[torch.Tensor, ...], - lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], - output_slices: Tuple[int, ...], - offset_start: int = 0, - add_input=True, - **kwargs) -> None: - """ - Performs GEMM and bias addition for multiple slices of lora_b. - - Semantics: - for i in range(len(lora_b_stacked)): - slice = output_slices[i] - y[:, offset:offset+slice] += x[i] @ lora_b_stacked[i] + - lora_bias_stacked[i] - offset += slice - - Args: - y (torch.Tensor): Output tensor. - x (Union[Tuple[torch.Tensor, ...], torch.Tensor]): Input tensors - lora_b_stacked (Tuple[torch.Tensor, ...]): lora_b's weight - lora_bias_stacked (Optional[Tuple[torch.Tensor, ...]]): - bias's weight - output_slices (Tuple[int, ...]): Every slice's size - add_input (bool): Defaults to True. - """ - y_org = y - y = y.view(-1, y.shape[-1]) - offset_left = offset_start - if lora_bias_stacked is not None: - self._apply_bias(self.token_lora_indices, y, output_slices, - lora_bias_stacked) - for slice_idx in range(len(lora_b_stacked)): - self._apply_expand( - y, - x[slice_idx], - lora_b_stacked[slice_idx], - offset_left, - output_slices[slice_idx], - add_input=add_input, - ) - offset_left += output_slices[slice_idx] - y = y.view_as(y_org) - - def add_lora_embedding(self, - y: torch.Tensor, - x: torch.Tensor, - lora_b_stacked: torch.Tensor, - add_input: bool = True, - **kwargs) -> None: - """ - Applies lora specifically for VocabParallelEmbeddingWithLoRA. - - Semantics: - y += x @ lora_b_stacked - - Args: - y (torch.Tensor): Output tensor. - x (torch.Tensor): Input tensor. - lora_b_stacked (torch.Tensor): lora_b's weights. - add_input (bool): Default to True. - """ - - # Embedding layer only need expand op - expand_fun: Callable = (self._expand_prefill - if self.is_prefill else self._expand_decode) - expand_fun(y, x, lora_b_stacked, add_input) - - def add_lora_linear(self, - y: torch.Tensor, - x: torch.Tensor, - lora_a_stacked: Tuple[torch.Tensor, ...], - lora_b_stacked: Tuple[torch.Tensor, ...], - lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], - scale: float, - output_slices: Tuple[int, ...], - *, - buffer: Optional[Tuple[torch.Tensor, ...]] = None, - **kwargs) -> None: - """ - Applicable to linear-related lora. - - Semantics: - for i in range(len(lora_a_stacked)): - y[i] += ( - x[i].unsqueeze(0) - @ lora_a_stacked[indices[i], layer_idx, :, :] - @ lora_b_stacked[indices[i], layer_idx, :, :] - * scale - ).squeeze(0)+lora_bias_stacked[i] - - Args: - y (torch.Tensor): Output tensor. Will be changed in-place. - x (torch.Tensor): Input tensor - lora_a_stacked (Tuple[torch.Tensor, ...]): lora_a's weight. - lora_b_stacked (Tuple[torch.Tensor, ...]): lora_b's weight. - lora_bias_stacked (Optional[Tuple[torch.Tensor, ...]]): lora's bias. - scale (float): Scaling factor. - output_slices (Tuple[int, ...]): Every slice's size. - buffer (Optional[Tuple[torch.Tensor, ...]]): Defaults to None. - """ - - assert len(lora_a_stacked) == len(lora_b_stacked) == len(output_slices) - if lora_bias_stacked is not None: - assert len(lora_bias_stacked) == len(output_slices) - y = self._apply_bias(self.token_lora_indices, y, output_slices, - lora_bias_stacked) - - if buffer is None: - r = lora_b_stacked[0].size(-1) - # We set the buffer to be float32 by default ,refer to: - # https://github.com/triton-lang/triton/issues/1387 - buffer = tuple( - torch.zeros( - (x.size(0), r), dtype=torch.float32, device=x.device) - for _ in range(len(output_slices))) - self.add_shrink(buffer, x, lora_a_stacked, scale, **kwargs) - self.add_expand(y, - buffer, - lora_b_stacked, - None, - output_slices, - add_input=True, - **kwargs) - - def add_lora_logits(self, - y: torch.Tensor, - x: torch.Tensor, - lora_a_stacked: torch.Tensor, - lora_b_stacked: torch.Tensor, - scale, - *, - buffer: Optional[torch.Tensor] = None, - **kwargs) -> None: - """ - Applies lora specifically for LogitsProcessorWithLoRA. - - Semantics: - buffer = (x @ lora_a_stacked) * scale - y += buffer @ lora_b_stacked - - Args: - y (torch.Tensor): Output tensor. - x (torch.Tensor): Input tensor. - lora_a_stacked (torch.Tensor): lora_a's weights. - lora_b_stacked (torch.Tensor):lora_b's weights. - scale (float): Scaling factor. - buffer (Optional[torch.Tensor]):Default to None. - """ - y_org = y - y = y.view(-1, y.shape[-1]) - x = x.view(-1, x.shape[-1]) - r = lora_b_stacked.size(-1) - if buffer is None: - # We set the buffer to be float32 by default ,refer to: - # https://github.com/triton-lang/triton/issues/1387 - buffer = torch.zeros((x.size(0), r), - dtype=torch.float32, - device=x.device) - # LogitsProcessorWithLoRA always using bgmv. - bgmv_shrink(x, lora_a_stacked, buffer, self.sampler_indices, scale) - bgmv_expand(buffer, - lora_b_stacked, - y, - self.sampler_indices, - add_inputs=True) - y = y.view_as(y_org) diff --git a/vllm/lora/punica_wrapper/punica_hpu.py b/vllm/lora/punica_wrapper/punica_hpu.py deleted file mode 100644 index b5ef4f2c41dbb..0000000000000 --- a/vllm/lora/punica_wrapper/punica_hpu.py +++ /dev/null @@ -1,87 +0,0 @@ -from typing import Optional, Tuple, Union, final - -import torch -from vllm_hpu_extension.ops import (dispatch_bgmv_embedding, - dispatch_bgmv_linear) - -from .punica_base import PunicaWrapperBase - - -@final -class PunicaWrapperHPU(PunicaWrapperBase): - - def __init__(self, max_num_batched_tokens: int, max_batches: int, - device: Union[torch.device, str], **kwargs): - # Increasing max_num_batched_tokens by 3x to handle increase in - # tensor size due to padding. - PunicaWrapperBase.__init__(self, 3 * max_num_batched_tokens, - max_batches, device) - - def add_lora_embedding(self, - y: torch.Tensor, - x: torch.Tensor, - lora_b_stacked: torch.Tensor, - add_input: bool = True, - **kwargs) -> None: - dispatch_bgmv_embedding(y, x, lora_b_stacked, 0) - - def add_lora_linear(self, - y: torch.Tensor, - x: torch.Tensor, - lora_a_stacked: Tuple[torch.Tensor, ...], - lora_b_stacked: Tuple[torch.Tensor, ...], - lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], - scale: float, - output_slices: Tuple[int, ...], - *, - buffer: Optional[Tuple[torch.Tensor, ...]] = None, - **kwargs) -> None: - y_org = y - x = x.view(-1, x.shape[-1]) - y = y.view(-1, y.shape[-1]) - offset_left = 0 - - for slice_idx in range(len(output_slices)): - dispatch_bgmv_linear( - y[:, offset_left:offset_left + output_slices[slice_idx]], x, - lora_a_stacked[slice_idx], lora_b_stacked[slice_idx], 0, scale) - offset_left += output_slices[slice_idx] - y = y.view_as(y_org) - - def add_lora_logits(self, - y: torch.Tensor, - x: torch.Tensor, - lora_a_stacked: torch.Tensor, - lora_b_stacked: torch.Tensor, - scale, - *, - buffer: Optional[torch.Tensor] = None, - **kwargs) -> None: - y_org = y - y = y.view(-1, y.shape[-1]) - x = x.view(-1, x.shape[-1]) - dispatch_bgmv_linear(y, x, lora_a_stacked, lora_b_stacked, 0, scale) - y = y.view_as(y_org) - - def add_shrink( - self, - y: Union[Tuple[torch.Tensor, ...], torch.Tensor], - x: torch.Tensor, - lora_a_stacked: Tuple[torch.Tensor, ...], - scale: float, - **kwargs, - ) -> None: - raise NotImplementedError - - def add_expand( - self, - y: torch.Tensor, - x: Union[Tuple[torch.Tensor, ...], torch.Tensor], - lora_b_stacked: Tuple[torch.Tensor, ...], - lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], - output_slices: Tuple[int, ...], - offset_start: int = 0, - add_input=True, - **kwargs, - ) -> None: - raise NotImplementedError \ No newline at end of file diff --git a/vllm/lora/punica_wrapper/punica_selector.py b/vllm/lora/punica_wrapper/punica_selector.py deleted file mode 100644 index cd64878d95ae3..0000000000000 --- a/vllm/lora/punica_wrapper/punica_selector.py +++ /dev/null @@ -1,19 +0,0 @@ -from vllm.platforms import current_platform -from vllm.utils import print_info_once - -from .punica_base import PunicaWrapperBase - - -def get_punica_wrapper(*args, **kwargs) -> PunicaWrapperBase: - if current_platform.is_cuda_alike(): - # Lazy import to avoid ImportError - from vllm.lora.punica_wrapper.punica_gpu import PunicaWrapperGPU - print_info_once("Using PunicaWrapperGPU.") - return PunicaWrapperGPU(*args, **kwargs) - elif current_platform.is_hpu(): - # Lazy import to avoid ImportError - from vllm.lora.punica_wrapper.punica_hpu import PunicaWrapperHPU - print_info_once("Using PunicaWrapperHPU.") - return PunicaWrapperHPU(*args, **kwargs) - else: - raise NotImplementedError diff --git a/vllm/lora/punica_wrapper/utils.py b/vllm/lora/punica_wrapper/utils.py deleted file mode 100644 index 7360c8c09e3ac..0000000000000 --- a/vllm/lora/punica_wrapper/utils.py +++ /dev/null @@ -1,159 +0,0 @@ -from typing import TYPE_CHECKING, List, Optional, Tuple, Union - -import torch - -if TYPE_CHECKING: - # avoid circuit import - from vllm.lora.layers import LoRAMapping - from vllm.lora.models import LongContextLoRAContext - - -def compute_meta( - token_lora_tensor: torch.Tensor -) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int, int, int, bool]: - """ - Get the information required for the sgmv kernel. With the features: - 1. If consecutive requests in the batch use the same LoRA, this function - will combine them into a single request, improving sgmv kernel inference - performance. - 2. At the beginning of each prefill stage inference, recalculations are - needed based on the input, but only once. - """ - - lora_indices_tensor, seq_length_tensor = torch.unique_consecutive( - token_lora_tensor, return_counts=True) - cum_result = torch.cumsum(seq_length_tensor, dim=0) - b_seq_start_tensor = torch.zeros_like(seq_length_tensor) - b_seq_start_tensor[1:].copy_(cum_result[:-1]) - max_length = seq_length_tensor.max().item() - token_nums = seq_length_tensor.sum().item() - batch_size = lora_indices_tensor.size(0) - no_lora = False - # -1 means no lora should be applied. Use `no_lora` to determine whether - # the current step requires LoRA. If LoRA is not needed, the prefill stage - # does not need to launch the triton kernel, which can improve performance - if batch_size == 1 and lora_indices_tensor == -1: - no_lora = True - return (b_seq_start_tensor, seq_length_tensor, lora_indices_tensor, - batch_size, max_length, token_nums, no_lora) - - -# TODO see if this can be vectorized -def convert_mapping( - mapping: "LoRAMapping", - lora_index_to_id: List[Optional[int]], - max_loras: int, - vocab_size: int, - extra_vocab_size: int, - device: torch.device, - long_lora_context: Optional["LongContextLoRAContext"] = None, -) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, - Optional[torch.Tensor], List[int]]: - """Converts LoRAMapping to index tensors. - - Args: - mapping: LoRAMapping mapping rows in a batch to LoRA ids. - lora_index_to_id: List mapping LoRA ids to LoRA indices. - max_loras: Maximum number of LoRAs. - vocab_size: Model vocab size. - extra_vocab_size: Extra vocab size each LoRA can have. - long_lora_context: Passed if there are long context lora in a batch. - - Returns: - A tuple of tensors: - base_indices: Tensor of shape [batch_size] mapping batch rows to - LoRA indices. - sampler_indices: Tensor of shape [batch_size] mapping requests to - LoRA indices for sampler. For generation, this will be the - same as base_indicies. For prefill, this will map requests - to LoRA indices. - sampler_indices_padded: Tensor of shape [batch_size] mapping - requests to LoRA indices for sampler with padding. - Same as sampler_indicies, but -1 is replaced with - max_loras. - embeddings_indices: Tensor of shape [2, batch_size] mapping - requests to embedding indices. First row is for embeddings - added by the LoRAs, second row is for the LoRA.lora_a - embeddings. - long_lora_indices: Tensor of shape [batch_size] mapping - requests to RoPE offsets and rot dims for long LoRAs. - None if long context lora doesn't exist. - indices_len: List of lengths of the above tensors. It contains - (base_indices, sampler_indices, sampler_indices_padded, - embeddings_indices, long_lora_indices). - """ - index_mapping_indices: List[int] = list(mapping.index_mapping).copy() - embedding_indices = index_mapping_indices.copy() - lora_indices = index_mapping_indices.copy() - long_lora_offsets: Optional[torch.Tensor] = None - if long_lora_context: - long_lora_offsets = torch.zeros(len(index_mapping_indices), - device=device, - dtype=torch.long) - prompt_mapping: List[int] = [ - lora_index_to_id.index(x) if x > 0 else -1 - for x in mapping.prompt_mapping - ] - lora_idx = None - for i in range(len(index_mapping_indices)): - # TODO index can be slow. optimize - lora_idx = (lora_index_to_id.index(index_mapping_indices[i]) - if index_mapping_indices[i] > 0 else -1) - embedding_indices[i] = lora_idx if index_mapping_indices[i] > 0 else 0 - lora_indices[i] = lora_idx - if long_lora_context: - assert long_lora_offsets is not None - lora_offset: int = long_lora_context.offsets_by_lora_id.get( - index_mapping_indices[i], 0) - long_lora_offsets[i] = lora_offset - - indices_list: List[Union[List[int], torch.Tensor]] = [ - index_mapping_indices, - lora_indices, - embedding_indices, - ] - if long_lora_context: - assert long_lora_offsets is not None - indices_list.append(long_lora_offsets) - indices = torch.tensor(indices_list, dtype=torch.long, device=device) - prompt_mapping_tensor = torch.tensor(prompt_mapping, - dtype=torch.long, - device=device) - embeddings_indices = torch.stack([ - indices[2] * extra_vocab_size, - indices[2] * (vocab_size + extra_vocab_size), - ]) - embeddings_indices[embeddings_indices == -1] = max_loras - 1 - base_indices = indices[1] - sampler_indices = prompt_mapping_tensor - sampler_indices_padded = sampler_indices.clone() - sampler_indices_padded[sampler_indices_padded == -1] = max_loras - 1 - sampler_indices_padded = torch.arange( - 0, len(sampler_indices_padded), device=device, dtype=torch.long) + ( - sampler_indices_padded * len(sampler_indices_padded)) - long_lora_indices = None - long_lora_indices_len: Optional[int] = None - if long_lora_context: - long_lora_indices = indices[3] - long_lora_indices_len = long_lora_indices.shape[-1] - # Contain length of indices tensors. Used to index into each tensor. - indices_len = [ - base_indices.shape[-1], - sampler_indices.shape[-1], - sampler_indices_padded.shape[-1], - embeddings_indices.shape[-1], - ] - if long_lora_indices_len is not None: - indices_len.append(long_lora_indices_len) - else: - # If long_lora doesn't exist,append None - indices_len.append(None) - - return ( - base_indices, - sampler_indices, - sampler_indices_padded, - embeddings_indices, - long_lora_indices, - indices_len, - ) diff --git a/vllm/model_executor/guided_decoding/__init__.py b/vllm/model_executor/guided_decoding/__init__.py index e631aec928ec5..a81377341e095 100644 --- a/vllm/model_executor/guided_decoding/__init__.py +++ b/vllm/model_executor/guided_decoding/__init__.py @@ -73,6 +73,14 @@ def maybe_backend_fallback( "Falling back to use outlines instead.") guided_params.backend = "outlines" + # xgrammar only supports EBNF grammars and uses the GBNF format + # https://github.com/ggerganov/llama.cpp/blob/master/grammars/README.md + elif (guided_params.grammar is not None + and "::=" not in guided_params.grammar): + logger.warning("xgrammar only supports EBNF grammars. " + "Falling back to use outlines instead.") + guided_params.backend = "outlines" + # xgrammar doesn't support some JSON schema features elif (guided_params.json is not None and has_xgrammar_unsupported_json_features(guided_params.json)): diff --git a/vllm/model_executor/guided_decoding/xgrammar_decoding.py b/vllm/model_executor/guided_decoding/xgrammar_decoding.py index 80e88dd5b4b37..8287cd6cf3aa0 100644 --- a/vllm/model_executor/guided_decoding/xgrammar_decoding.py +++ b/vllm/model_executor/guided_decoding/xgrammar_decoding.py @@ -14,9 +14,6 @@ except ImportError: pass -from vllm.model_executor.guided_decoding.xgrammar_utils import ( - convert_lark_to_gbnf, grammar_is_likely_lark) - if TYPE_CHECKING: from transformers import PreTrainedTokenizer @@ -148,27 +145,15 @@ def from_guided_params(cls, else: json_str = guided_params.json return cls(json_str=json_str, - vocab_size=model_config.hf_text_config.vocab_size, + vocab_size=model_config.hf_config.vocab_size, encoded_vocab=encoded_vocab, stop_token_ids=stop_token_ids, backend_str=backend_str, tokenizer_hash=tokenizer_hash, max_threads=max_threads) elif guided_params.grammar: - # XGrammar only supports GBNF grammars, so we must convert Lark - if grammar_is_likely_lark(guided_params.grammar): - try: - grammar_str = convert_lark_to_gbnf(guided_params.grammar) - except ValueError as e: - raise ValueError( - "Failed to convert the grammar from Lark to GBNF. " - "Please either use GBNF grammar directly or specify" - " --guided-decoding-backend=outlines.\n" - f"Conversion error: {str(e)}") from e - else: - grammar_str = guided_params.grammar - return cls(grammar_str=grammar_str, - vocab_size=model_config.hf_text_config.vocab_size, + return cls(grammar_str=guided_params.grammar, + vocab_size=model_config.hf_config.vocab_size, encoded_vocab=encoded_vocab, stop_token_ids=stop_token_ids, backend_str=backend_str, @@ -176,7 +161,7 @@ def from_guided_params(cls, max_threads=max_threads) elif guided_params.json_object: return cls(json_object=True, - vocab_size=model_config.hf_text_config.vocab_size, + vocab_size=model_config.hf_config.vocab_size, encoded_vocab=encoded_vocab, stop_token_ids=stop_token_ids, backend_str=backend_str, diff --git a/vllm/model_executor/guided_decoding/xgrammar_utils.py b/vllm/model_executor/guided_decoding/xgrammar_utils.py deleted file mode 100644 index 12b42245f4e3d..0000000000000 --- a/vllm/model_executor/guided_decoding/xgrammar_utils.py +++ /dev/null @@ -1,162 +0,0 @@ -import re - - -def grammar_is_likely_lark(grammar_str: str) -> bool: - """ - Check if grammar appears to use Lark syntax. - - Args: - grammar_str: Input grammar string - - Returns: - bool: True if grammar appears to be in Lark format, False otherwise - - Examples: - >>> grammar_is_likely_lark("rule: 'abc'") - True - >>> grammar_is_likely_lark("rule ::= 'abc'") - False - """ - if not grammar_str or not isinstance(grammar_str, str): - return False - - for line in grammar_str.split('\n'): - # Remove both comment styles - line = re.sub(r'(#|//).*$', '', line).strip() - if not line: - continue - - # Look for Lark-style rule definitions - if ':' in line and '::=' not in line: - return True - - # Look for Lark-specific features - if any(pattern in line for pattern in ['?start:', '|', '~']): - return True - - return False - - -def convert_lark_to_gbnf(grammar_str: str) -> str: - """ - Convert a Lark grammar string to GBNF format. - - GBNF reference: - https://github.com/ggerganov/llama.cpp/blob/master/grammars/README.md - Lark grammar reference: - https://lark-parser.readthedocs.io/en/latest/grammar.html - - Args: - grammar_str: Input grammar in Lark format - - Returns: - str: Converted grammar in GBNF format - - Examples: - >>> print(convert_lark_to_gbnf("rule: 'hello'")) - root ::= rule - rule ::= "hello" - """ - if not isinstance(grammar_str, str): - raise ValueError(f"Grammar must be a string, got {type(grammar_str)}") - if not grammar_str.strip(): - raise ValueError("Grammar string cannot be empty") - - defined_rules = set() - referenced_rules = set() - output_lines = [] - - def clean_line(line: str) -> str: - """Remove comments and whitespace from line.""" - return re.sub(r'(#|//).*$', '', line).strip() - - def check_quotes(text: str, rule_name: str, line_num: int) -> None: - """Validate quote matching in text.""" - if text.count("'") % 2 != 0 or text.count('"') % 2 != 0: - raise ValueError( - f"Mismatched quotes in {rule_name} on line {line_num}") - - def extract_references(text: str) -> set: - """Extract rule references from text.""" - # Remove quoted strings and special characters - text = re.sub(r'"[^"]*"', '', text) - text = re.sub(r'[+*?()|\[\]{}]', ' ', text) - return set(re.findall(r'\b[a-zA-Z_][a-zA-Z0-9_]*\b', text)) - - # First pass: Find root rule and validate rule definitions - lines = [clean_line(line) for line in grammar_str.split('\n')] - first_rule = None - - for line_num, line in enumerate(lines, 1): - if not line or line.startswith('|'): - continue - - if ':' in line: - try: - name = line.split(':', 1)[0].strip().strip('?') - defined_rules.add(name) - if first_rule is None: - first_rule = name - if name == 'start': - first_rule = 'start' - except IndexError as e: - raise ValueError(f"Invalid rule format on line {line_num}. " - "Expected 'rule_name: definition'") from e - - if not defined_rules: - raise ValueError("No valid rules found in grammar") - - # Add root rule - output_lines.append(f"root ::= {first_rule}") - - # Second pass: Process rule definitions and alternatives - current_rule = None - current_definition = [] - - for line_num, line in enumerate(lines, 1): - if not line: - continue - - try: - if ':' in line and not line.startswith('|'): - # Save previous rule if exists - if current_rule: - output_lines.append( - f"{current_rule} ::= {' | '.join(current_definition)}") - - # Process new rule - name, definition = line.split(':', 1) - current_rule = name.strip().strip('?') - - check_quotes(definition, f"rule '{current_rule}'", line_num) - definition = re.sub(r"'([^']*)'", r'"\1"', definition) - referenced_rules.update(extract_references(definition)) - current_definition = [definition.strip()] - - elif line.startswith('|'): - if not current_rule: - raise ValueError(f"Alternative '|' on line {line_num} " - "without a preceding rule definition") - - alt_def = line[1:].strip() - check_quotes(alt_def, f"alternative for rule '{current_rule}'", - line_num) - alt_def = re.sub(r"'([^']*)'", r'"\1"', alt_def) - referenced_rules.update(extract_references(alt_def)) - current_definition.append(alt_def) - - except ValueError as e: - raise ValueError(f"Error on line {line_num}: {str(e)}") from e - - # Add final rule if exists - if current_rule: - output_lines.append( - f"{current_rule} ::= {' | '.join(current_definition)}") - - # Validate all rules are defined - undefined_rules = referenced_rules - defined_rules - {'root'} - if undefined_rules: - raise ValueError("Referenced rules are not defined: " - f"{', '.join(sorted(undefined_rules))}") - - return '\n'.join(output_lines) diff --git a/vllm/model_executor/layers/layernorm.py b/vllm/model_executor/layers/layernorm.py index 43ea4eb5a4d1a..345919c5d1636 100644 --- a/vllm/model_executor/layers/layernorm.py +++ b/vllm/model_executor/layers/layernorm.py @@ -20,7 +20,6 @@ def __init__( hidden_size: int, eps: float = 1e-6, var_hidden_size: Optional[int] = None, - has_weight: bool = True, ) -> None: super().__init__() @@ -28,11 +27,7 @@ def __init__( self.variance_epsilon = eps self.variance_size_override = (None if var_hidden_size == hidden_size else var_hidden_size) - self.has_weight = has_weight - - self.weight = torch.ones(hidden_size) - if self.has_weight: - self.weight = nn.Parameter(self.weight) + self.weight = nn.Parameter(torch.ones(hidden_size)) def forward_native( self, @@ -64,9 +59,7 @@ def forward_native( variance = x_var.pow(2).mean(dim=-1, keepdim=True) x = x * torch.rsqrt(variance + self.variance_epsilon) - x = x.to(orig_dtype) - if self.has_weight: - x = x * self.weight + x = x.to(orig_dtype) * self.weight if residual is None: return x else: diff --git a/vllm/model_executor/layers/logits_processor.py b/vllm/model_executor/layers/logits_processor.py index 1d9dad02976c6..85c6847545860 100644 --- a/vllm/model_executor/layers/logits_processor.py +++ b/vllm/model_executor/layers/logits_processor.py @@ -5,7 +5,6 @@ import torch import torch.nn as nn -import vllm.envs as envs from vllm.distributed import (tensor_model_parallel_all_gather, tensor_model_parallel_gather) from vllm.model_executor.layers.vocab_parallel_embedding import ( @@ -43,9 +42,7 @@ def __init__(self, # Soft cap the logits. Used in Gemma 2. self.soft_cap = soft_cap # Whether to use gather or all-gather to gather the logits. - - self.use_gather = not current_platform.is_tpu( - ) and not envs.VLLM_USE_V1 + self.use_gather = not current_platform.is_tpu() def forward( self, diff --git a/vllm/model_executor/layers/mamba/mamba_mixer.py b/vllm/model_executor/layers/mamba/mamba_mixer.py index 10bec75f49fdf..8ef0a6cdf2c52 100644 --- a/vllm/model_executor/layers/mamba/mamba_mixer.py +++ b/vllm/model_executor/layers/mamba/mamba_mixer.py @@ -40,7 +40,6 @@ def __init__(self, use_conv_bias: bool, use_bias: bool, use_rms_norm: bool, - rms_norm_has_weight: bool = True, rms_norm_eps: float = 1e-5, activation="silu"): super().__init__() @@ -106,23 +105,14 @@ def A_weight_loader(param: Parameter, loaded_weight: torch.Tensor): input_is_parallel=True, ) - self.dt_layernorm = RMSNorm( - time_step_rank, - eps=rms_norm_eps, - has_weight=rms_norm_has_weight, - ) if use_rms_norm else None - - self.b_layernorm = RMSNorm( - ssm_state_size, - eps=rms_norm_eps, - has_weight=rms_norm_has_weight, - ) if use_rms_norm else None - - self.c_layernorm = RMSNorm( - ssm_state_size, - eps=rms_norm_eps, - has_weight=rms_norm_has_weight, - ) if use_rms_norm else None + self.dt_layernorm = RMSNorm(time_step_rank, + eps=rms_norm_eps) if use_rms_norm else None + + self.b_layernorm = RMSNorm(ssm_state_size, + eps=rms_norm_eps) if use_rms_norm else None + + self.c_layernorm = RMSNorm(ssm_state_size, + eps=rms_norm_eps) if use_rms_norm else None def forward_native(self, hidden_states: torch.Tensor, attn_metadata: AttentionMetadata, diff --git a/vllm/model_executor/model_loader/loader.py b/vllm/model_executor/model_loader/loader.py index b9866738d03e9..96353e0274654 100644 --- a/vllm/model_executor/model_loader/loader.py +++ b/vllm/model_executor/model_loader/loader.py @@ -101,10 +101,12 @@ def _initialize_model( vllm_config: VllmConfig, *, prefix: str = "", + architectures: Optional[list[str]] = None, ) -> nn.Module: """Initialize a model with the given configurations.""" model_config = vllm_config.model_config - model_class, _ = get_model_architecture(model_config) + model_class, _ = get_model_architecture(model_config, + architectures=architectures) signatures = inspect.signature(model_class.__init__) all_params = [param.name for param in signatures.parameters.values()] diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py index 65e7abf866076..5851267475aef 100644 --- a/vllm/model_executor/model_loader/utils.py +++ b/vllm/model_executor/model_loader/utils.py @@ -1,6 +1,6 @@ """Utilities for selecting and loading models.""" import contextlib -from typing import Tuple, Type +from typing import Optional, Tuple, Type import torch from torch import nn @@ -20,8 +20,12 @@ def set_default_torch_dtype(dtype: torch.dtype): def get_model_architecture( - model_config: ModelConfig) -> Tuple[Type[nn.Module], str]: - architectures = getattr(model_config.hf_config, "architectures", []) + model_config: ModelConfig, + *, + architectures: Optional[list[str]] = None, +) -> Tuple[Type[nn.Module], str]: + if architectures is None: + architectures = getattr(model_config.hf_config, "architectures", []) # Special handling for quantized Mixtral. # FIXME(woosuk): This is a temporary hack. diff --git a/vllm/model_executor/models/exaone.py b/vllm/model_executor/models/exaone.py index 0398f0943a70a..5ca26d53a17e7 100644 --- a/vllm/model_executor/models/exaone.py +++ b/vllm/model_executor/models/exaone.py @@ -473,11 +473,10 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, config.vocab_size, logit_scale) + self.sampler = get_sampler() else: self.lm_head = PPMissingLayer() - self.sampler = get_sampler() - self.make_empty_intermediate_tensors = ( self.transformer.make_empty_intermediate_tensors) diff --git a/vllm/model_executor/models/granite.py b/vllm/model_executor/models/granite.py index f9e0443b9a508..bd2394e71c973 100644 --- a/vllm/model_executor/models/granite.py +++ b/vllm/model_executor/models/granite.py @@ -400,17 +400,16 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.lm_head.weight = self.model.embed_tokens.weight logit_scale = getattr(config, "logit_scale", 1.0) + if hasattr(config, "logits_scaling"): logit_scale /= config.logits_scaling - self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, config.vocab_size, scale=logit_scale) + self.sampler = get_sampler() else: self.lm_head = PPMissingLayer() - self.sampler = get_sampler() - def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: return self.model.get_input_embeddings(input_ids) diff --git a/vllm/model_executor/models/interfaces.py b/vllm/model_executor/models/interfaces.py index c3979eab905db..01a381381ccec 100644 --- a/vllm/model_executor/models/interfaces.py +++ b/vllm/model_executor/models/interfaces.py @@ -36,11 +36,6 @@ def get_multimodal_embeddings(self, **kwargs) -> Optional[T]: """ Returns multimodal embeddings generated from multimodal kwargs to be merged with text embeddings. - - The output embeddings must be one of the following formats: - - A list or tuple of 2D tensors, where each tensor corresponds to - each input image. - - A single 3D tensor, with the batch dimension grouping the 2D tensors. """ ... diff --git a/vllm/model_executor/models/internvl.py b/vllm/model_executor/models/internvl.py index 42c769f79e202..d5a7781fecfc3 100644 --- a/vllm/model_executor/models/internvl.py +++ b/vllm/model_executor/models/internvl.py @@ -26,7 +26,7 @@ InternVisionPatchModel) from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs -from vllm.multimodal.inputs import NestedTensors, PlaceholderRange +from vllm.multimodal.inputs import NestedTensors from vllm.multimodal.utils import cached_get_tokenizer from vllm.sequence import IntermediateTensors from vllm.utils import is_list_of @@ -52,18 +52,12 @@ class InternVLImagePixelInputs(TypedDict): Shape: `(batch_size * num_images * (1 + num_patches), num_channels, height, width)` """ - patches_per_image: List[int] - """ - List of number of total patches for each image in the batch. - """ class InternVLImageEmbeddingInputs(TypedDict): type: Literal["image_embeds"] - data: NestedTensors - """ - A tensor of shape `(num_images, total_image_feature_size, hidden_size)` - or a list of tensors of shape `(total_image_feature_size, hidden_size)` + data: torch.Tensor + """Shape: `(batch_size * num_images, image_feature_size, hidden_size)` `hidden_size` must match the hidden size of language model backbone. """ @@ -355,32 +349,10 @@ def input_processor( new_prompt = self._expand_image_prompt(prompt, image_feature_sizes, num_patches) new_prompt_token_ids = tokenizer.encode(new_prompt) - img_context_token_id = tokenizer.encode(self.img_context_token, - add_special_tokens=False) - assert len(img_context_token_id) == 1, \ - (f"Invalid image token '{self.img_context_token}': A valid image " - f"token encodes to a single token ID, got {img_context_token_id}.") - img_context_token_id = img_context_token_id[0] - - # Get precise tracking of placeholder positions - token_idx = image_idx = 0 - placeholder_ranges = [] - while token_idx < len(new_prompt_token_ids): - if new_prompt_token_ids[token_idx] == img_context_token_id: - curr_image_featue_size = image_feature_sizes[image_idx] - placeholder_ranges.append( - PlaceholderRange(offset=token_idx, - length=curr_image_featue_size)) - image_idx += 1 - token_idx += curr_image_featue_size - else: - token_idx += 1 - return token_inputs( - prompt=prompt, - prompt_token_ids=new_prompt_token_ids, - multi_modal_data=multi_modal_data, - multi_modal_placeholders={"image": placeholder_ranges}) + return token_inputs(prompt=prompt, + prompt_token_ids=new_prompt_token_ids, + multi_modal_data=multi_modal_data) def input_mapper( self, @@ -642,46 +614,26 @@ def _parse_and_validate_image_input( if not isinstance(pixel_values, (torch.Tensor, list)): raise ValueError("Incorrect type of pixel values. " f"Got type: {type(pixel_values)}") - - patches_per_image = [] - for request_pixel_values in pixel_values: - for image_pixel_values in request_pixel_values: - patches_per_image.append(image_pixel_values.shape[0]) # We need to flatten (B, N, P) to (B*N*P), # so we call flatten_bn twice. return InternVLImagePixelInputs( type="pixel_values", data=self._validate_pixel_values( flatten_bn(flatten_bn(pixel_values), concat=True)), - patches_per_image=patches_per_image) + ) raise AssertionError("This line should be unreachable.") def _process_image_input( self, image_input: InternVLImageInputs, - ) -> Tuple[torch.Tensor]: + ) -> torch.Tensor: if image_input["type"] == "image_embeds": return image_input["data"] assert self.vision_model is not None - image_embeds = self.extract_feature(image_input["data"]) - patches_per_image = image_input["patches_per_image"] - if len(patches_per_image) == 1: - image_embeds = image_embeds.unsqueeze(0) - return image_embeds - - # NOTE: Image embeddings are split into separate tensors for each image - # by the size of each embedding. - feature_size = image_embeds.shape[1] - image_embeds = image_embeds.view(-1, - self.config.text_config.hidden_size) - image_feature_sizes = [ - num_patches * feature_size for num_patches in patches_per_image - ] - image_embeds = image_embeds.split(image_feature_sizes) return image_embeds def _set_visual_token_mask(self, input_ids: torch.Tensor) -> torch.Tensor: @@ -744,11 +696,13 @@ def forward( "inputs_embeds": inputs_embeds, } - # Only required if the model is mono-architecture if self.visual_token_mask is not None: + # overwrite visual_token_mask and img_context_token_id back to None, + # so that this doesn't need to depend on encoder output forward_kwargs.update( {"visual_token_mask": self.visual_token_mask}) self.visual_token_mask = None + self.img_context_token_id = None hidden_states = self.language_model.model(**forward_kwargs) return hidden_states diff --git a/vllm/model_executor/models/llama.py b/vllm/model_executor/models/llama.py index 6461a80cef331..baf30690c0b14 100644 --- a/vllm/model_executor/models/llama.py +++ b/vllm/model_executor/models/llama.py @@ -548,11 +548,10 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, config.vocab_size, logit_scale) + self.sampler = get_sampler() else: self.lm_head = PPMissingLayer() - self.sampler = get_sampler() - self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) diff --git a/vllm/model_executor/models/llava.py b/vllm/model_executor/models/llava.py index 65c6bd07bfff0..d375c1c9da2a9 100644 --- a/vllm/model_executor/models/llava.py +++ b/vllm/model_executor/models/llava.py @@ -1,19 +1,17 @@ from functools import cached_property -from types import MethodType from typing import (Iterable, List, Literal, Mapping, Optional, Protocol, Set, Tuple, TypedDict, Union) import torch import torch.nn as nn -from PIL.Image import Image -from transformers import (BatchFeature, CLIPVisionConfig, LlavaConfig, - PixtralVisionConfig, PretrainedConfig, - ProcessorMixin, SiglipVisionConfig) -from transformers.models.pixtral import PixtralProcessor +from PIL import Image +from transformers import (CLIPVisionConfig, LlavaConfig, PixtralVisionConfig, + PretrainedConfig, SiglipVisionConfig) from vllm.attention import AttentionMetadata from vllm.config import VllmConfig -from vllm.inputs import InputContext +from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, DummyData, + InputContext) from vllm.model_executor.layers.activation import get_act_fn from vllm.model_executor.layers.linear import (ColumnParallelLinear, RowParallelLinear) @@ -21,21 +19,21 @@ from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY -from vllm.multimodal.inputs import MultiModalKwargs, NestedTensors -from vllm.multimodal.processing import (BaseMultiModalProcessor, - InputProcessingContext, - ModalityProcessingMetadata, - MultiModalProcessingMetadata, - PromptReplacement) +from vllm.multimodal.inputs import NestedTensors from vllm.sequence import IntermediateTensors +from vllm.utils import is_list_of from .clip import (CLIPVisionModel, dummy_image_for_clip, - get_max_clip_image_tokens) + dummy_seq_data_for_clip, get_max_clip_image_tokens, + input_processor_for_clip) from .interfaces import SupportsMultiModal, SupportsPP from .pixtral import (PixtralHFVisionModel, dummy_image_for_pixtral_hf, - get_max_pixtral_hf_image_tokens) + dummy_seq_data_for_pixtral_hf, + get_max_pixtral_hf_image_tokens, + input_processor_for_pixtral_hf) from .siglip import (SiglipVisionModel, dummy_image_for_siglip, - get_max_siglip_image_tokens) + dummy_seq_data_for_siglip, get_max_siglip_image_tokens, + input_processor_for_siglip) from .utils import (AutoWeightsLoader, flatten_bn, init_vllm_registered_model, maybe_prefix, merge_multimodal_embeddings) @@ -115,116 +113,103 @@ def get_max_llava_image_tokens(ctx: InputContext): raise ValueError(f"Unexpected select feature strategy: {strategy}") -def dummy_mm_kwargs_for_llava(ctx: InputProcessingContext, - mm_counts: Mapping[str, int]): +def dummy_data_for_llava(ctx: InputContext, seq_len: int, + mm_counts: Mapping[str, int]): hf_config = ctx.get_hf_config(LlavaConfig) vision_config = hf_config.vision_config num_images = mm_counts["image"] - if isinstance(vision_config, CLIPVisionConfig): - data = dummy_image_for_clip(vision_config, num_images) - elif isinstance(vision_config, SiglipVisionConfig): - data = dummy_image_for_siglip(vision_config, num_images) - elif isinstance(vision_config, PixtralVisionConfig): - data = dummy_image_for_pixtral_hf(vision_config, num_images) - else: - msg = f"Unsupported vision config: {type(vision_config)}" - raise NotImplementedError(msg) - - hf_processor = ctx.get_hf_processor() - image_processor = hf_processor.image_processor # type: ignore - hf_inputs = image_processor.preprocess(data['image'], return_tensors="pt") - is_pixtral = isinstance(hf_processor, PixtralProcessor) - - return MultiModalKwargs( - **hf_inputs, - is_pixtral=torch.tensor(is_pixtral), - ) - - -def create_metadata_for_llava( - ctx: InputProcessingContext) -> MultiModalProcessingMetadata: - hf_config = ctx.get_hf_config(LlavaConfig) - image_token_id = hf_config.image_token_index - - def get_repl_count( - mm_items: list[Image], - hf_inputs: BatchFeature, - item_idx: int, - ) -> int: - return get_max_llava_image_tokens(ctx) - - return { - "image": - ModalityProcessingMetadata(prompt_repls=[ - PromptReplacement(target=[image_token_id], - repl_unit=[image_token_id], - repl_count=get_repl_count), - ]), - } + image_feature_size = get_max_llava_image_tokens(ctx) - -class LlavaProcessor(BaseMultiModalProcessor): - - def __init__(self, ctx: InputProcessingContext) -> None: - super().__init__( - ctx=ctx, - metadata=create_metadata_for_llava(ctx), + if isinstance(vision_config, CLIPVisionConfig): + seq_data, ranges = dummy_seq_data_for_clip( + vision_config, + seq_len, + num_images, + image_token_id=hf_config.image_token_index, + image_feature_size_override=image_feature_size, ) - def _patch_pixtral_processor(self, hf_processor: PixtralProcessor): - if getattr(hf_processor, "__is_patched__", False): - return # Already patched + mm_data = dummy_image_for_clip(vision_config, num_images) + return DummyData(seq_data, mm_data, ranges) + elif isinstance(vision_config, SiglipVisionConfig): + seq_data, ranges = dummy_seq_data_for_siglip( + vision_config, + seq_len, + num_images, + image_token_id=hf_config.image_token_index, + image_feature_size_override=image_feature_size, + ) - image_processor = hf_processor.image_processor # type: ignore - orig_preprocess = image_processor.preprocess + mm_data = dummy_image_for_siglip(vision_config, num_images) + return DummyData(seq_data, mm_data, ranges) + elif isinstance(vision_config, PixtralVisionConfig): + seq_data, ranges = dummy_seq_data_for_pixtral_hf( + vision_config, + seq_len, + num_images, + image_token_id=hf_config.image_token_index, + image_feature_size_override=image_feature_size, + ) - def preprocess(__self, *args, **kwargs): - hf_inputs = orig_preprocess(*args, **kwargs) - hf_inputs["is_pixtral"] = torch.tensor(True) - return hf_inputs + mm_data = dummy_image_for_pixtral_hf(vision_config, num_images) + return DummyData(seq_data, mm_data, ranges) - image_processor.preprocess = MethodType(preprocess, image_processor) + msg = f"Unsupported vision config: {type(vision_config)}" + raise NotImplementedError(msg) - hf_processor.__is_patched__ = True # type: ignore - def _get_hf_processor(self) -> ProcessorMixin: - hf_processor = self.ctx.get_hf_processor() +def input_processor_for_llava(ctx: InputContext, inputs: DecoderOnlyInputs): + multi_modal_data = inputs.get("multi_modal_data") + if multi_modal_data is None or "image" not in multi_modal_data: + return inputs - if isinstance(hf_processor, PixtralProcessor): - self._patch_pixtral_processor(hf_processor) + model_config = ctx.model_config + hf_config = ctx.get_hf_config(LlavaConfig) + vision_config = hf_config.vision_config - return hf_processor + image_data = multi_modal_data["image"] + if isinstance(image_data, Image.Image): + image_feature_size = get_max_llava_image_tokens(ctx) + elif is_list_of(image_data, Image.Image): + image_feature_size = [get_max_llava_image_tokens(ctx) + ] * len(image_data) + elif isinstance(image_data, torch.Tensor): + num_images, image_feature_size, hidden_size = image_data.shape + elif is_list_of(image_data, torch.Tensor): + image_feature_size = [item.shape[1] for item in image_data] + else: + raise TypeError(f"Invalid image type: {type(image_data)}") - def _get_dummy_mm_kwargs( - self, - mm_counts: Mapping[str, int], - ) -> MultiModalKwargs: - hf_config = self.ctx.get_hf_config(LlavaConfig) - vision_config = hf_config.vision_config - num_images = mm_counts["image"] - - if isinstance(vision_config, CLIPVisionConfig): - data = dummy_image_for_clip(vision_config, num_images) - elif isinstance(vision_config, SiglipVisionConfig): - data = dummy_image_for_siglip(vision_config, num_images) - elif isinstance(vision_config, PixtralVisionConfig): - data = dummy_image_for_pixtral_hf(vision_config, num_images) - else: - msg = f"Unsupported vision config: {type(vision_config)}" - raise NotImplementedError(msg) - - hf_processor = self._get_hf_processor() - image_processor = hf_processor.image_processor # type: ignore - hf_inputs = image_processor.preprocess(data['image'], - return_tensors="pt") - is_pixtral = isinstance(hf_processor, PixtralProcessor) - - return MultiModalKwargs( - **hf_inputs, - is_pixtral=torch.tensor(is_pixtral), + if isinstance(vision_config, CLIPVisionConfig): + return input_processor_for_clip( + model_config, + vision_config, + inputs, + image_token_id=hf_config.image_token_index, + image_feature_size_override=image_feature_size, + ) + elif isinstance(vision_config, SiglipVisionConfig): + return input_processor_for_siglip( + model_config, + vision_config, + inputs, + image_token_id=hf_config.image_token_index, + image_feature_size_override=image_feature_size, + ) + elif isinstance(vision_config, PixtralVisionConfig): + # We ignore image_feature_size_override since we have non-uniform + # image sizes for Pixtral + return input_processor_for_pixtral_hf( + model_config, + vision_config, + inputs, + image_token_id=hf_config.image_token_index, ) + msg = f"Unsupported vision config: {type(vision_config)}" + raise NotImplementedError(msg) + class LlavaLikeConfig(Protocol): vision_config: PretrainedConfig @@ -306,8 +291,10 @@ def init_vision_tower_for_llava( raise NotImplementedError(msg) +@MULTIMODAL_REGISTRY.register_image_input_mapper() @MULTIMODAL_REGISTRY.register_max_image_tokens(get_max_llava_image_tokens) -@MULTIMODAL_REGISTRY.register_processor(LlavaProcessor) +@INPUT_REGISTRY.register_dummy_data(dummy_data_for_llava) +@INPUT_REGISTRY.register_input_processor(input_processor_for_llava) class LlavaForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP): # BitandBytes specific attributes bitsandbytes_stacked_params_mapping = { @@ -380,10 +367,38 @@ def _validate_pixel_values(self, data: torch.Tensor) -> torch.Tensor: return data + def _validate_image_sizes(self, images: List[torch.Tensor], + sizes: List[torch.Tensor]) -> List[torch.Tensor]: + if not isinstance(sizes, list): + sizes = [sizes] + + total_images = sum(size.numel() // 2 for size in sizes) + if total_images != len(images): + raise ValueError("Mismatch in number of images. " + f"Expected {total_images}, got {len(images)}") + img_idx = 0 + for size in sizes: + # Flatten the size tensor to a list of (height, width) pairs + size = size.view(-1, 2).tolist() + for expected_h, expected_w in size: + if img_idx >= len(images): + raise ValueError("Ran out of images before sizes. " + f"{img_idx} >= {len(images)}") + img = images[img_idx] + if img.shape[-2:] != (expected_h, expected_w): + raise ValueError( + "Image size mismatch. Expected " + f"{(expected_h, expected_w)}, got {img.shape[-2:]}") + if img.shape[-3] != 3: + raise ValueError("Image channel mismatch. Expected 3, " + f"got {img.shape[-3]}") + img_idx += 1 + return images + def _parse_and_validate_image_input( self, **kwargs: object) -> Optional[LlavaImageInputs]: pixel_values = kwargs.pop("pixel_values", None) - is_pixtral = kwargs.pop("is_pixtral", torch.tensor([False])) + image_sizes = kwargs.pop("image_sizes", None) image_embeds = kwargs.pop("image_embeds", None) if pixel_values is None and image_embeds is None: @@ -394,8 +409,9 @@ def _parse_and_validate_image_input( raise ValueError("Incorrect type of pixel values. " f"Got type: {type(pixel_values)}") - assert isinstance(is_pixtral, torch.Tensor) - if is_pixtral.any(): + # Case for models like PixtralHF that have dynamic image sizes + # so we need to produce a list of tensors + if image_sizes is not None: images = pixel_values def flatten_to_3d_tensors(item): @@ -418,7 +434,7 @@ def flatten_to_3d_tensors(item): return LlavaImagePixelInputs( type="pixel_values", - data=images, + data=self._validate_image_sizes(images, image_sizes), ) return LlavaImagePixelInputs( @@ -586,28 +602,3 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self) return loader.load_weights(weights) - - -class MantisProcessor(LlavaProcessor): - - def _get_hf_processor(self) -> ProcessorMixin: - try: - from mantis.models.mllava import MLlavaProcessor - except ModuleNotFoundError as exc: - raise ModuleNotFoundError( - "You need to `pip install " - "git+https://github.com/TIGER-AI-Lab/Mantis.git` " - "to use this model") from exc - - processor = MLlavaProcessor.from_pretrained( - self.ctx.model_config.tokenizer) - assert isinstance(processor, ProcessorMixin) - return processor - - -# To use this model, please use -# `--hf_overrides '{"architectures": ["MantisForConditionalGeneration"]}'` -@MULTIMODAL_REGISTRY.register_max_image_tokens(get_max_llava_image_tokens) -@MULTIMODAL_REGISTRY.register_processor(MantisProcessor) -class MantisForConditionalGeneration(LlavaForConditionalGeneration): - pass diff --git a/vllm/model_executor/models/mamba.py b/vllm/model_executor/models/mamba.py index 8bdcd2c5aad1f..b32032e411b0a 100644 --- a/vllm/model_executor/models/mamba.py +++ b/vllm/model_executor/models/mamba.py @@ -1,5 +1,5 @@ """PyTorch MAMBA model.""" -from typing import Iterable, List, Optional, Set, Tuple +from typing import Iterable, List, Optional, Tuple import torch from torch import nn @@ -47,7 +47,6 @@ def __init__(self, use_conv_bias=config.use_conv_bias, use_bias=config.use_bias, use_rms_norm=self.is_falcon_mamba, - rms_norm_has_weight=not self.is_falcon_mamba, rms_norm_eps=mixer_rms_eps, activation=config.hidden_act) @@ -242,10 +241,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, - torch.Tensor]]) -> Set[str]: + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): params_dict = dict(self.named_parameters()) - loaded_params: Set[str] = set() for name, loaded_weight in weights: if "A_log" in name: name = name.replace("A_log", "A") @@ -257,5 +254,3 @@ def load_weights(self, weights: Iterable[Tuple[str, weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) - loaded_params.add(name) - return loaded_params diff --git a/vllm/model_executor/models/molmo.py b/vllm/model_executor/models/molmo.py index a328b5a2aeea7..d1fcbd167c199 100644 --- a/vllm/model_executor/models/molmo.py +++ b/vllm/model_executor/models/molmo.py @@ -37,7 +37,7 @@ ParallelLMHead, VocabParallelEmbedding) from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs -from vllm.multimodal.inputs import NestedTensors, PlaceholderRange +from vllm.multimodal.inputs import NestedTensors from vllm.multimodal.utils import cached_get_tokenizer from vllm.sequence import (VLLM_TOKEN_ID_ARRAY_TYPE, IntermediateTensors, SequenceData) @@ -46,16 +46,12 @@ from .interfaces import SupportsMultiModal, SupportsPP from .utils import (AutoWeightsLoader, WeightsMapper, is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, - maybe_prefix, merge_multimodal_embeddings) + maybe_prefix) # TODO: hard-coded for now. Consider making it configurable. VIT_LAYERS = [-2, -9] NUM_PREFIX_TOKENS = 1 ADDITIONAL_VOCAB_SIZE = 128 -DEFAULT_IMAGE_PATCH_TOKEN_ID = 152066 -DEFAULT_IM_START_TOKEN_ID = 152067 -DEFAULT_IM_END_TOKEN_ID = 152064 -DEFAULT_IM_COL_TOKEN_ID = 152065 class MolmoImageInputs(TypedDict): @@ -79,11 +75,6 @@ class MolmoImageInputs(TypedDict): `(batch_size, num_crops, num_patch)` """ - image_start_end: Tuple[int, int] - """Starting and ending index of placeholder - tokens - """ - @dataclass class VisionBackboneConfig: @@ -927,8 +918,6 @@ def image_input_mapper_for_molmo( ctx: InputContext, data: object, ): - if isinstance(data, list): - data = data[0] return MultiModalKwargs(data) @@ -978,22 +967,7 @@ def dummy_data_for_molmo(ctx: InputContext, seq_len: int, if "image_masks" in out: dummy_imgdata["image_masks"] = out["image_masks"] dummy_imgdata["seq_len"] = torch.tensor(seq_len, dtype=torch.long) - size = 0 - offset = -1 - for i in range(len(token_ids)): - if token_ids[i] in (DEFAULT_IMAGE_PATCH_TOKEN_ID, - DEFAULT_IM_START_TOKEN_ID, DEFAULT_IM_END_TOKEN_ID, - DEFAULT_IM_COL_TOKEN_ID): - if offset < 0: - offset = i - size += 1 - dummy_imgdata["image_start_end"] = (offset, offset + size) - return DummyData(seq_data=dummy_seqdata, - multi_modal_data={"image": dummy_imgdata}, - multi_modal_placeholders={ - "image": - [PlaceholderRange(offset=offset, length=size)] - }) + return DummyData(dummy_seqdata, {"image": dummy_imgdata}) def pad_images( @@ -1081,34 +1055,19 @@ def input_processor_for_molmo(ctx: InputContext, inputs: DecoderOnlyInputs): if image_masks is not None: image_data["image_masks"] = image_masks - new_prompt_token_ids = out["input_ids"].tolist() - image_data["seq_len"] = torch.tensor(len(new_prompt_token_ids), + image_data["seq_len"] = torch.tensor(len(out["input_ids"]), dtype=torch.long) multi_modal_data = dict(image=image_data) - size = 0 - offset = -1 - for i in range(len(new_prompt_token_ids)): - if new_prompt_token_ids[i] in (DEFAULT_IMAGE_PATCH_TOKEN_ID, - DEFAULT_IM_START_TOKEN_ID, - DEFAULT_IM_END_TOKEN_ID, - DEFAULT_IM_COL_TOKEN_ID): - if offset < 0: - offset = i - size += 1 - image_data["image_start_end"] = (offset, offset + size) prompt = inputs.get("prompt") if prompt is None: - prompt = tokenizer.decode(new_prompt_token_ids) + prompt = tokenizer.decode(out["input_ids"]) return token_inputs( - prompt_token_ids=new_prompt_token_ids, + prompt_token_ids=out["input_ids"], prompt=prompt, multi_modal_data=multi_modal_data, - multi_modal_placeholders={ - "image": [PlaceholderRange(offset=offset, length=size)] - }, ) @@ -1154,7 +1113,6 @@ def _parse_and_validate_image_input( ) -> Optional[MolmoImageInputs]: images = kwargs.pop("images", None) image_masks = kwargs.pop("image_masks", None) - image_start_end = kwargs.pop("image_start_end", None) if images is None: return None @@ -1172,7 +1130,6 @@ def _parse_and_validate_image_input( image_input_idx=image_input_idx, seq_len=seq_len, image_masks=image_masks, - image_start_end=image_start_end, ) def _process_image_input( @@ -1221,16 +1178,9 @@ def get_multimodal_embeddings(self, **kwargs) -> Optional[NestedTensors]: # Note: In this original implementation from AI2, the final # vision_embeddings will be always be the same length - # of input embeddings. + # of input embedddings, which is not very efficient. + # TODO(ywang96): see if this can be optimized. vision_embeddings = torch.einsum('nd,nm->md', image_features, mat) - - # Split by the sizes of the input sequences. For each full embedding, - # extract the actual vision embeddings to be merged. - vision_embeddings = list(vision_embeddings.split(seq_len.tolist())) - for i in range(len(vision_embeddings)): - start, end = image_input['image_start_end'][i] - vision_embeddings[i] = vision_embeddings[i][start:end] - return vision_embeddings def get_input_embeddings( @@ -1240,11 +1190,7 @@ def get_input_embeddings( ) -> torch.Tensor: inputs_embeds = self.model.get_input_embeddings(input_ids) if multimodal_embeddings is not None: - inputs_embeds = merge_multimodal_embeddings( - input_ids, inputs_embeds, multimodal_embeddings, [ - DEFAULT_IMAGE_PATCH_TOKEN_ID, DEFAULT_IM_START_TOKEN_ID, - DEFAULT_IM_END_TOKEN_ID, DEFAULT_IM_COL_TOKEN_ID - ]) + inputs_embeds = inputs_embeds + multimodal_embeddings return inputs_embeds def forward( diff --git a/vllm/model_executor/models/nemotron.py b/vllm/model_executor/models/nemotron.py index 34cb9981c167b..c7b4c22b6896b 100644 --- a/vllm/model_executor/models/nemotron.py +++ b/vllm/model_executor/models/nemotron.py @@ -435,11 +435,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, config.vocab_size, logit_scale) + self.sampler = get_sampler() else: self.lm_head = PPMissingLayer() - - self.sampler = get_sampler() - self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) diff --git a/vllm/model_executor/models/phi3v.py b/vllm/model_executor/models/phi3v.py index 3c7854ce388ab..eef23029a2aca 100644 --- a/vllm/model_executor/models/phi3v.py +++ b/vllm/model_executor/models/phi3v.py @@ -12,18 +12,22 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from functools import cached_property -from typing import (Iterable, List, Literal, Mapping, Optional, Set, Tuple, - TypedDict, Union) +import itertools +import re +from functools import cached_property, lru_cache +from typing import (Any, Dict, Iterable, List, Literal, Mapping, Optional, Set, + Tuple, TypedDict, Union) +import numpy as np import torch import torch.nn as nn -from transformers import (BatchFeature, CLIPVisionConfig, PretrainedConfig, - ProcessorMixin) +from PIL import Image +from transformers import CLIPVisionConfig, PretrainedConfig from vllm.attention import AttentionMetadata -from vllm.config import VllmConfig -from vllm.inputs import InputContext +from vllm.config import ModelConfig, VllmConfig +from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, DummyData, + InputContext, token_inputs) from vllm.logger import init_logger from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler @@ -32,18 +36,12 @@ from vllm.model_executor.models.clip import CLIPVisionModel from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY -from vllm.multimodal.image import cached_get_image_processor -from vllm.multimodal.inputs import MultiModalKwargs, NestedTensors -from vllm.multimodal.processing import (BaseMultiModalProcessor, - InputProcessingContext, - ModalityProcessingMetadata, - MultiModalDataDict, - MultiModalProcessingMetadata, - PromptReplacement) +from vllm.multimodal.inputs import NestedTensors, PlaceholderRange +from vllm.multimodal.utils import cached_get_tokenizer, repeat_and_pad_token from vllm.sequence import IntermediateTensors from vllm.utils import is_list_of -from .clip import dummy_image_for_clip +from .clip import dummy_image_for_clip, dummy_seq_data_for_clip from .interfaces import SupportsMultiModal, SupportsPP from .utils import (AutoWeightsLoader, WeightsMapper, flatten_bn, init_vllm_registered_model, maybe_prefix, @@ -305,99 +303,231 @@ def add_image_newline(self, image_features_hd): return image_features_hd_newline +# Based on https://huggingface.co/microsoft/Phi-3-vision-128k-instruct/blob/main/image_processing_phi3_v.py#L57 +def _calc_padded_size(*, width: int, height: int, padding_unit: int = 336): + target_height = int(np.ceil(height / padding_unit) * padding_unit) + top_padding = int((target_height - height) / 2) + bottom_padding = target_height - height - top_padding + padded_width = width + padded_height = height + top_padding + bottom_padding + return padded_width, padded_height + + +# Based on https://huggingface.co/microsoft/Phi-3-vision-128k-instruct/blob/main/image_processing_phi3_v.py#L90 +def _calc_hd_transform_size(*, width: int, height: int, hd_num: int): + transposed = False + if width < height: + width, height = height, width + transposed = True + + ratio = width / height + scale = 1 + while scale * np.ceil(scale / ratio) <= hd_num: + scale += 1 + scale -= 1 + + new_width = int(scale * 336) + new_height = int(new_width / ratio) + + padded_width, padded_height = _calc_padded_size(width=new_width, + height=new_height) + + if transposed: + padded_width, padded_height = padded_height, padded_width + + return padded_width, padded_height + + +# Based on https://huggingface.co/microsoft/Phi-3-vision-128k-instruct/blob/main/image_processing_phi3_v.py#L181 +def get_phi3v_image_feature_size( + hf_config: Dict[str, Any], + *, + input_height: int, + input_width: int, + num_crops: int, +) -> int: + if num_crops is None: + num_crops = hf_config.get("num_crops", 16) + new_width, new_height = _calc_hd_transform_size(width=input_width, + height=input_height, + hd_num=num_crops) + + return (new_height // 336 * new_width // 336 + 1) * 144 + 1 \ + + (new_height // 336 + 1) * 12 + + def get_max_phi3v_image_tokens(ctx: InputContext, *, num_crops: Optional[int] = None): - mm_processor_kwargs = {} - if num_crops is not None: - mm_processor_kwargs["num_crops"] = num_crops - model_config = ctx.model_config - image_processor = cached_get_image_processor( - model_config.model, - trust_remote_code=model_config.trust_remote_code, - **mm_processor_kwargs, - ) - - num_tokens = image_processor.calc_num_image_tokens_from_image_size( - width=MAX_IMAGE_FEATURE_SIZE_WIDTH, - height=MAX_IMAGE_FEATURE_SIZE_HEIGHT, + return get_phi3v_image_feature_size( + ctx.get_hf_image_processor_config(), + input_height=MAX_IMAGE_FEATURE_SIZE_HEIGHT, + input_width=MAX_IMAGE_FEATURE_SIZE_WIDTH, + num_crops=num_crops, ) - return num_tokens -def dummy_mm_kwargs_for_phi3v(ctx: InputProcessingContext, - mm_counts: Mapping[str, int]): +def dummy_data_for_phi3v(ctx: InputContext, + seq_len: int, + mm_counts: Mapping[str, int], + *, + num_crops: Optional[int] = None): num_images = mm_counts["image"] - data = dummy_image_for_clip( + image_feature_size = get_max_phi3v_image_tokens(ctx, num_crops=num_crops) + + seq_data, ranges = dummy_seq_data_for_clip( + CLIP_VIT_LARGE_PATCH14_336_CONFIG, + seq_len, + num_images, + image_token_id=_IMAGE_TOKEN_ID, + image_feature_size_override=image_feature_size, + ) + mm_data = dummy_image_for_clip( CLIP_VIT_LARGE_PATCH14_336_CONFIG, num_images, image_width_override=MAX_IMAGE_FEATURE_SIZE_WIDTH, image_height_override=MAX_IMAGE_FEATURE_SIZE_HEIGHT, ) - hf_processor = ctx.get_hf_processor() - image_processor = hf_processor.image_processor # type: ignore - hf_inputs = image_processor.preprocess(data['image'], return_tensors="pt") + return DummyData(seq_data, mm_data, ranges) - return MultiModalKwargs(**hf_inputs) +@lru_cache +def _get_image_placeholder_token_id_candidates( + model_config: ModelConfig, + idx: int, +) -> List[List[int]]: + assert idx > 0 -def create_metadata_for_phi3v( - ctx: InputProcessingContext) -> MultiModalProcessingMetadata: - return { - "image": - ModalityProcessingMetadata(prompt_repls=[ - PromptReplacement(target=[_IMAGE_TOKEN_ID], - repl_unit=[_IMAGE_TOKEN_ID], - repl_count=get_max_phi3v_image_tokens(ctx)), - ]), - } + tokenizer = cached_get_tokenizer(model_config.tokenizer) + # This is used when the image token is at the start of the string + start_candidate = tokenizer.encode(f"<|image_{idx}|>", + add_special_tokens=False) -class Phi3VProcessor(BaseMultiModalProcessor): + # This is used when the image token is in the middle of the string + # We need to get the token for "<", not "▁<" + # https://huggingface.co/microsoft/Phi-3-vision-128k-instruct/raw/main/tokenizer.json + a_token_id, = tokenizer.encode("a", add_special_tokens=False) + a_token_id_, *middle_candidate = tokenizer.encode(f"a<|image_{idx}|>", + add_special_tokens=False) + assert a_token_id == a_token_id_ - def __init__(self, ctx: InputProcessingContext) -> None: - super().__init__( - ctx=ctx, - metadata=create_metadata_for_phi3v(ctx), - ) + return [start_candidate, middle_candidate] - def _get_hf_processor( - self, - *, - num_crops: Optional[int] = None, - ) -> ProcessorMixin: - if num_crops is not None: - return self.ctx.get_hf_processor(num_crops=num_crops) - return self.ctx.get_hf_processor() - - def _apply_hf_processor( - self, - prompt: str, - mm_data: MultiModalDataDict, - mm_processor_kwargs: Mapping[str, object], - ) -> BatchFeature: - processed_outputs = super()._apply_hf_processor( - prompt, mm_data, mm_processor_kwargs) - # Phi3v processor has inserted -1, -2 etc as placeholder in prompt_ids, - # which will cause OverflowError when decoding the prompt_ids. - # Therefore, we need to do an early replacement here - token_ids = processed_outputs['input_ids'] - token_ids[token_ids < 0] = _IMAGE_TOKEN_ID - processed_outputs['input_ids'] = token_ids - return processed_outputs - - def _get_dummy_mm_kwargs( - self, - mm_counts: Mapping[str, int], - ) -> MultiModalKwargs: - return dummy_mm_kwargs_for_phi3v(self.ctx, mm_counts) + +def input_processor_for_phi3v(ctx: InputContext, + inputs: DecoderOnlyInputs, + *, + num_crops: Optional[int] = None): + multi_modal_data = inputs.get("multi_modal_data") + if multi_modal_data is None or "image" not in multi_modal_data: + return inputs + + model_config = ctx.model_config + hf_config = ctx.get_hf_image_processor_config() + + image_data = multi_modal_data["image"] + if isinstance(image_data, Image.Image): + w, h = image_data.size + image_feature_size = [ + get_phi3v_image_feature_size(hf_config, + input_width=w, + input_height=h, + num_crops=num_crops) + ] + image_data = [image_data] + elif is_list_of(image_data, Image.Image): + image_feature_size = [] + for image in image_data: + w, h = image.size + image_feature_size.append( + get_phi3v_image_feature_size(hf_config, + input_width=w, + input_height=h, + num_crops=num_crops)) + elif isinstance(image_data, torch.Tensor): + image_feature_size = [image_data.shape[0]] + image_data = [image_data] + elif is_list_of(image_data, torch.Tensor): + image_feature_size = [item.shape[0] for item in image_data] + else: + raise TypeError(f"Invalid image type: {type(image_data)}") + + prompt = inputs.get("prompt") + if prompt is None: + # for async server request, we assume prompt and its token_ids is always + # in correct format. And num_image_tags == len(image_data) always True. + image_idx = range(1, len(image_data) + 1) + new_prompt = None + else: + image_idx = sorted(map(int, re.findall(r"<\|image_(\d+)\|>+", prompt))) + if prompt.count("<|image|>") > 0: + logger.warning("Please follow the prompt format that is " + "documented on HuggingFace which does not involve " + "repeating <|image|> tokens.") + elif (num_image_tags := len(image_idx)) > 1: + assert num_image_tags == len( + image_data), "The count of image_placeholder not match image's" + new_prompt = prompt + + prompt_token_ids = inputs["prompt_token_ids"].copy() + + # masked placeholder with image token id + for idx in image_idx: + candidates = _get_image_placeholder_token_id_candidates(model_config, + idx=idx) + + for candidate in candidates: + for i in range(len(prompt_token_ids) - len(candidate) + 1): + if prompt_token_ids[i:i + len(candidate)] == candidate: + prompt_token_ids[i:i + + len(candidate)] = ([_IMAGE_TOKEN_ID] * + len(candidate)) + break + + # merge consecutive tag ids + merged_token_ids: List[int] = [] + for is_placeholder, token_ids in itertools.groupby( + prompt_token_ids, lambda x: x == _IMAGE_TOKEN_ID): + if is_placeholder: + merged_token_ids.append(_IMAGE_TOKEN_ID) + else: + merged_token_ids.extend(list(token_ids)) + + # TODO: Move this to utils or integrate with clip. + new_token_ids: List[int] = [] + placeholder_ranges: List[PlaceholderRange] = [] + placeholder_idx = 0 + while merged_token_ids: + token_id = merged_token_ids.pop(0) + if token_id == _IMAGE_TOKEN_ID: + replacement_ids = repeat_and_pad_token( + _IMAGE_TOKEN_ID, + repeat_count=image_feature_size[placeholder_idx], + ) + placeholder_ranges.append({ + "offset": len(new_token_ids), + "length": len(replacement_ids) + }) + new_token_ids.extend(replacement_ids) + placeholder_idx += 1 + else: + new_token_ids.append(token_id) + + # NOTE: Create a defensive copy of the original inputs + return token_inputs(prompt_token_ids=new_token_ids, + prompt=new_prompt, + multi_modal_data=multi_modal_data, + multi_modal_placeholders={"image": placeholder_ranges}) +@MULTIMODAL_REGISTRY.register_image_input_mapper() @MULTIMODAL_REGISTRY.register_max_image_tokens(get_max_phi3v_image_tokens) -@MULTIMODAL_REGISTRY.register_processor(Phi3VProcessor) +@INPUT_REGISTRY.register_dummy_data(dummy_data_for_phi3v) +@INPUT_REGISTRY.register_input_processor(input_processor_for_phi3v) class Phi3VForCausalLM(nn.Module, SupportsMultiModal, SupportsPP): def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): diff --git a/vllm/model_executor/models/pixtral.py b/vllm/model_executor/models/pixtral.py index 94a4ab882c1a9..215727cadd954 100644 --- a/vllm/model_executor/models/pixtral.py +++ b/vllm/model_executor/models/pixtral.py @@ -1,5 +1,6 @@ from dataclasses import dataclass, fields from functools import cached_property +from itertools import tee from typing import Iterable, List, Mapping, Optional, Set, Tuple, Union import numpy @@ -47,9 +48,6 @@ except ImportError: USE_XFORMERS_OPS = False -PIXTRAL_IMAGE_BREAK_ID = 12 -PIXTRAL_IMAGE_END_ID = 13 - def get_max_pixtral_image_tokens(ctx: InputContext): tokenizer = cached_get_tokenizer( @@ -70,6 +68,7 @@ def dummy_data_for_pixtral(ctx: InputContext, seq_len: int, tokenizer_mode=ctx.model_config.tokenizer_mode) mm_encoder = tokenizer.mistral.instruct_tokenizer.mm_encoder + patch_size = mm_encoder.mm_config.image_patch_size image_token_id = mm_encoder.special_ids.img mm_config = ctx.model_config.multimodal_config @@ -79,8 +78,8 @@ def dummy_data_for_pixtral(ctx: InputContext, seq_len: int, size = 256 image = Image.new("RGB", (size, size), color=0) - encoding = tokenizer.instruct.mm_encoder(ImageChunk(image=image)) - image_feature_size = len(encoding.tokens) + image_feature_size = (size**2) // (patch_size**2) + num_image_tokens = image_feature_size * num_images seq_data = SequenceData.from_prompt_token_counts( (image_token_id, num_image_tokens), @@ -102,13 +101,14 @@ def input_mapper_for_pixtral(ctx: InputContext, Args: ctx: Context of the loaded model. - data: data potentially containing PIL images to be processed - and mapped to `images`. + data: data potentially containing image/image embeddings to be mapped + to pixel_values in .forward() for a visual QWenLMHeadModel model. Returns: MultiModalKwargs containing the stacked normalized images tensor or image embeddings. """ + # Early exit if we have provided an image to a language only Qwen model model_config = ctx.model_config tokenizer = cached_get_tokenizer( model_config.tokenizer, tokenizer_mode=model_config.tokenizer_mode) @@ -116,67 +116,35 @@ def input_mapper_for_pixtral(ctx: InputContext, data_list = data if isinstance(data, list) else [data] images = [] - image_tokens_list = [] for image_data in data_list: image = ImageChunk(image=image_data) encoding = tokenizer.instruct.mm_encoder(image) image = torch.from_numpy(encoding.image).to(device="cuda", dtype=torch.float16) images.append(image) - image_tokens_list.append(encoding.tokens) - image_tokens = torch.tensor([ - token_id for image_tokens in image_tokens_list - for token_id in image_tokens - ]) - return MultiModalKwargs({"images": images, "image_tokens": image_tokens}) + return MultiModalKwargs({"images": images}) def input_processor_for_pixtral(ctx: InputContext, inputs: DecoderOnlyInputs): multi_modal_data = inputs.get("multi_modal_data") - if multi_modal_data is None or "image" not in multi_modal_data: - return inputs + if multi_modal_data is not None and "image" in multi_modal_data: + tokenizer = cached_get_tokenizer( + ctx.model_config.tokenizer, + tokenizer_mode=ctx.model_config.tokenizer_mode) - prompt_token_ids = inputs.get("prompt_token_ids") - prompt = inputs.get("prompt") - tokenizer = cached_get_tokenizer( - ctx.model_config.tokenizer, - tokenizer_mode=ctx.model_config.tokenizer_mode) + mm_encoder = tokenizer.mistral.instruct_tokenizer.mm_encoder + image_token_id = mm_encoder.special_ids.img - mm_encoder = tokenizer.mistral.instruct_tokenizer.mm_encoder - image_token_id = mm_encoder.special_ids.img - image_break_id = mm_encoder.special_ids.img_break - image_end_id = mm_encoder.special_ids.img_end - - if image_token_id not in inputs['prompt_token_ids']: - raise ValueError( - f"You've passed {inputs=} without {image_token_id=}" - " Make sure to process your input via mistral_common's" - " tokenizer or pass a chat completion request. For more" - " For more info, see: " - "https://github.com/vllm-project/vllm/issues/8411.") - - # Get precise tracking of placeholder positions - placeholder_ranges = [] - curr_offset = -1 - curr_length = 0 - for i in range(len(prompt_token_ids)): - if prompt_token_ids[i] in (image_token_id, image_break_id): - if curr_offset < 0: - curr_offset = i - curr_length += 1 - elif prompt_token_ids[i] == image_end_id: - curr_length += 1 - placeholder_ranges.append( - PlaceholderRange(offset=curr_offset, length=curr_length)) - curr_offset = -1 - curr_length = 0 - else: - pass - return token_inputs(prompt=prompt, - prompt_token_ids=prompt_token_ids, - multi_modal_data=multi_modal_data, - multi_modal_placeholders={"image": placeholder_ranges}) + if image_token_id not in inputs['prompt_token_ids']: + raise ValueError( + f"You've passed {inputs=} without {image_token_id=}" + " Make sure to process your input via mistral_common's" + " tokenizer or pass a chat completion request. For more" + " For more info, see: " + "https://github.com/vllm-project/vllm/issues/8411.") + + return inputs @MULTIMODAL_REGISTRY.register_image_input_mapper(input_mapper_for_pixtral) @@ -224,29 +192,11 @@ def sampler(self): return get_sampler() def get_multimodal_embeddings(self, **kwargs) -> Optional[NestedTensors]: - image_input, image_tokens = self._parse_and_validate_image_input( - **kwargs) + image_input = self._parse_and_validate_image_input(**kwargs) if image_input is None: return None - vision_embeddings = self._process_image_input(image_input) - - # NOTE: We patch the outputs of the vision encoder with embeddings - # from `[IMG_BREAK]` and `[IMG_END]` tokens. - image_embeds = self.language_model.get_input_embeddings(image_tokens) - image_token_mask = image_tokens == self.vision_args.image_token_id - image_embeds[image_token_mask] = vision_embeddings - - # NOTE: Image embeddings are split into separate tensors for each image - # by the indices of `[IMG_END]` token. - split_indices = torch.where( - image_tokens == PIXTRAL_IMAGE_END_ID)[0] + 1 - if len(split_indices) <= 1: - # Do not split, return as tensor of shape [1, fs, hs] - return image_embeds.unsqueeze(0) - - image_embeds = image_embeds.tensor_split(split_indices.cpu()) - return image_embeds + return vision_embeddings def get_input_embeddings( self, @@ -256,10 +206,8 @@ def get_input_embeddings( inputs_embeds = self.language_model.get_input_embeddings(input_ids) if multimodal_embeddings is not None: inputs_embeds = merge_multimodal_embeddings( - input_ids, inputs_embeds, multimodal_embeddings, [ - self.vision_args.image_token_id, PIXTRAL_IMAGE_END_ID, - PIXTRAL_IMAGE_BREAK_ID - ]) + input_ids, inputs_embeds, multimodal_embeddings, + self.vision_args.image_token_id) return inputs_embeds def forward( @@ -297,11 +245,10 @@ def forward( def _parse_and_validate_image_input( self, images: Optional[Union[List[List[torch.Tensor]], List[torch.Tensor], - torch.Tensor]] = None, - image_tokens: Optional[torch.Tensor] = None, + torch.Tensor]] = None ) -> Optional[List[torch.Tensor]]: if images is None: - return None, None + return None if isinstance(images, torch.Tensor): # if passed as batch take all images @@ -320,16 +267,7 @@ def _parse_and_validate_image_input( images = flatten_images - if isinstance(image_tokens, torch.Tensor): - # image_tokens are batched - image_tokens = image_tokens.flatten() - elif isinstance(image_tokens, list): - # image_tokens are of different lengths thus passed as a list - image_tokens = torch.cat(image_tokens) - - assert image_tokens.dim() == 1 - - return images, image_tokens + return images def _process_image_input(self, image_input: List[torch.Tensor]) -> torch.Tensor: @@ -358,33 +296,38 @@ def is_vision_encoder_weights(weight: Tuple[str, torch.Tensor]): def is_vision_lang_adapter_weights(weight: Tuple[str, torch.Tensor]): return weight[0].startswith("vision_language_adapter") - # Get references to parameters for direct loading + def is_vision_weights(weight: Tuple[str, torch.Tensor]): + return is_vision_encoder_weights( + weight) or is_vision_lang_adapter_weights(weight) + + llm_weights, vision_encoder_weights, vision_lang_adapter_weights = tee( + weights, 3) + + # llm + llm_weights = filter(lambda x: not is_vision_weights(x), llm_weights) + self.language_model.load_weights(llm_weights) + + # vision encoder + vision_encoder_weights = filter(is_vision_encoder_weights, + vision_encoder_weights) vision_encoder_dict = dict(self.vision_encoder.named_parameters()) - vision_lang_adapter_dict = dict( - self.vision_language_adapter.named_parameters()) + for name, loaded_weight in vision_encoder_weights: + # cut 'vision_encoder.' + name = '.'.join(name.split(".")[1:]) + param = vision_encoder_dict[name] + + default_weight_loader(param, loaded_weight) - def llm_weights_generator(): - # Single pass over weights - for name, w in weights: - if is_vision_encoder_weights((name, w)): - # Load vision encoder weights directly - trimmed_name = '.'.join(name.split(".")[1:]) - param = vision_encoder_dict[trimmed_name] - with torch.no_grad(): - default_weight_loader(param, w) - elif is_vision_lang_adapter_weights((name, w)): - # Load vision-language adapter weights directly - trimmed_name = '.'.join(name.split(".")[1:]) - param = vision_lang_adapter_dict[trimmed_name] - with torch.no_grad(): - default_weight_loader(param, w) - else: - # LLM weights: yield them to be loaded - # by language_model.load_weights - yield (name, w) - - # Now we call the language model load with the generator - self.language_model.load_weights(llm_weights_generator()) + # adapter + vision_lang_adapter_weights = filter(is_vision_lang_adapter_weights, + vision_lang_adapter_weights) + vision_lang_adpter_dict = dict( + self.vision_language_adapter.named_parameters()) + for name, loaded_weight in vision_lang_adapter_weights: + # cut 'vision_language_adapter.' + name = '.'.join(name.split(".")[1:]) + param = vision_lang_adpter_dict[name] + default_weight_loader(param, loaded_weight) # Vision encoder diff --git a/vllm/model_executor/models/qwen2.py b/vllm/model_executor/models/qwen2.py index 9e34f2820cff9..c30cb396b96f5 100644 --- a/vllm/model_executor/models/qwen2.py +++ b/vllm/model_executor/models/qwen2.py @@ -449,17 +449,14 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.model = Qwen2Model(vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")) - if get_pp_group().is_last_rank: - if config.tie_word_embeddings: - self.lm_head = self.model.embed_tokens - else: - self.lm_head = ParallelLMHead(config.vocab_size, - config.hidden_size, - quant_config=quant_config, - prefix=maybe_prefix( - prefix, "lm_head")) + if config.tie_word_embeddings: + self.lm_head = self.model.embed_tokens else: - self.lm_head = PPMissingLayer() + self.lm_head = ParallelLMHead(config.vocab_size, + config.hidden_size, + quant_config=quant_config, + prefix=maybe_prefix( + prefix, "lm_head")) self.logits_processor = LogitsProcessor(config.vocab_size) self.sampler = get_sampler() diff --git a/vllm/model_executor/models/qwen2_audio.py b/vllm/model_executor/models/qwen2_audio.py index 48a2d470414b9..a0605fee82aca 100644 --- a/vllm/model_executor/models/qwen2_audio.py +++ b/vllm/model_executor/models/qwen2_audio.py @@ -19,7 +19,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only Qwen2-Audio model compatible with HuggingFace weights.""" -from functools import cached_property, lru_cache +from functools import lru_cache from typing import (Iterable, List, Mapping, Optional, Set, Tuple, TypedDict, Union) @@ -34,7 +34,12 @@ from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, DummyData, InputContext, token_inputs) from vllm.logger import init_logger +from vllm.model_executor.layers.logits_processor import LogitsProcessor from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ParallelLMHead +from vllm.model_executor.model_loader.weight_utils import ( + default_weight_loader, maybe_remap_kv_scale_name) +from vllm.model_executor.models.qwen2 import Qwen2Model from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs from vllm.multimodal.inputs import NestedTensors @@ -42,11 +47,15 @@ from vllm.sequence import IntermediateTensors, SequenceData from .interfaces import SupportsMultiModal, SupportsPP -from .utils import (AutoWeightsLoader, init_vllm_registered_model, - maybe_prefix, merge_multimodal_embeddings) +from .utils import merge_multimodal_embeddings logger = init_logger(__name__) +_KEYS_TO_MODIFY_MAPPING = { + "language_model.lm_head": "lm_head", + "language_model.model": "language_model", +} + # # === Audio Inputs === # class Qwen2AudioInputs(TypedDict): @@ -272,23 +281,25 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.quant_config = quant_config - self.language_model = init_vllm_registered_model( - vllm_config=vllm_config, - hf_config=config.text_config, - prefix=maybe_prefix(prefix, "language_model"), - architectures=["Qwen2ForCausalLM"], - ) + self.language_model = Qwen2Model( + vllm_config=vllm_config.with_hf_config(config.text_config), + prefix=prefix) + self.unpadded_vocab_size = config.text_config.vocab_size + if config.text_config.tie_word_embeddings: + self.lm_head = self.language_model.embed_tokens + else: + self.lm_head = ParallelLMHead(config.text_config.vocab_size, + config.text_config.hidden_size, + quant_config=quant_config) + logit_scale = getattr(config, "logit_scale", 1.0) + self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, + config.text_config.vocab_size, + logit_scale) + self.sampler = get_sampler() self.make_empty_intermediate_tensors = ( self.language_model.make_empty_intermediate_tensors) - @cached_property - def sampler(self): - if hasattr(self.language_model, "sampler"): - return self.language_model.sampler - - return get_sampler() - def _validate_and_reshape_mm_tensor(self, mm_input: Union[torch.Tensor, List[torch.Tensor]], @@ -403,30 +414,72 @@ def forward( multimodal_embeddings) input_ids = None - hidden_states = self.language_model.model(input_ids, - positions, - kv_caches, - attn_metadata, - intermediate_tensors, - inputs_embeds=inputs_embeds) + hidden_states = self.language_model(input_ids, + positions, + kv_caches, + attn_metadata, + intermediate_tensors, + inputs_embeds=inputs_embeds) return hidden_states - def compute_logits( - self, - hidden_states: torch.Tensor, - sampling_metadata: SamplingMetadata, - ) -> Optional[torch.Tensor]: - return self.language_model.compute_logits(hidden_states, - sampling_metadata) + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head, hidden_states, + sampling_metadata) + return logits def sample( self, logits: torch.Tensor, sampling_metadata: SamplingMetadata, ) -> Optional[SamplerOutput]: - return self.language_model.sample(logits, sampling_metadata) + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]) -> Set[str]: - loader = AutoWeightsLoader(self) - return loader.load_weights(weights) + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ("gate_up_proj", "gate_proj", 0), + ("gate_up_proj", "up_proj", 1), + ] + params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() + for name, loaded_weight in weights: + if "rotary_emb.inv_freq" in name: + continue + if (self.config.text_config.tie_word_embeddings + and "lm_head.weight" in name): + continue + for key_to_modify, new_key in _KEYS_TO_MODIFY_MAPPING.items(): + if key_to_modify in name: + name = name.replace(key_to_modify, new_key) + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name or 'audio' in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + # Remapping the name of FP8 kv-scale. + name = maybe_remap_kv_scale_name(name, params_dict) + if name is None: + continue + + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index cfc90cdab01e4..27175dbae7483 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -21,7 +21,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only Qwen2-VL model compatible with HuggingFace weights.""" -from functools import cached_property, partial +from functools import partial from typing import (Any, Callable, Dict, Iterable, List, Literal, Mapping, Optional, Set, Tuple, Type, TypedDict, Union) @@ -40,7 +40,7 @@ from vllm.attention import AttentionMetadata from vllm.config import VllmConfig -from vllm.distributed import parallel_state +from vllm.distributed import get_pp_group, parallel_state from vllm.distributed import utils as dist_utils from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, DummyData, InputContext, token_inputs) @@ -49,12 +49,15 @@ from vllm.model_executor.layers.activation import QuickGELU from vllm.model_executor.layers.linear import (ColumnParallelLinear, RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.quantization.gptq import GPTQConfig from vllm.model_executor.layers.quantization.gptq_marlin import ( GPTQMarlinConfig) from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ParallelLMHead from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.models.qwen2 import Qwen2Model from vllm.multimodal import MULTIMODAL_REGISTRY from vllm.multimodal.image import cached_get_image_processor from vllm.multimodal.inputs import (MultiModalData, MultiModalDataDict, @@ -66,8 +69,9 @@ from vllm.transformers_utils.processor import cached_get_processor from .interfaces import SupportsLoRA, SupportsMultiModal, SupportsPP -from .utils import (AutoWeightsLoader, WeightsMapper, get_vit_attn_backend, - init_vllm_registered_model, maybe_prefix) +from .utils import (PPMissingLayer, get_vit_attn_backend, + is_pp_missing_parameter, + make_empty_intermediate_tensors_factory, maybe_prefix) logger = init_logger(__name__) @@ -502,8 +506,6 @@ def __init__( mlp_ratio: float = vision_config.mlp_ratio self.spatial_merge_size = spatial_merge_size - self.num_heads = num_heads - self.embed_dim = embed_dim self.patch_embed = Qwen2VisionPatchEmbed( patch_size=patch_size, @@ -593,53 +595,6 @@ def forward( x = self.merger(x) return x - def load_weights(self, weights: Iterable[Tuple[str, - torch.Tensor]]) -> Set[str]: - stacked_params_mapping = [ - # (param_name, shard_name, shard_id) - ("qkv_proj", "q_proj", "q"), - ("qkv_proj", "k_proj", "k"), - ("qkv_proj", "v_proj", "v"), - ] - params_dict = dict(self.named_parameters(remove_duplicate=False)) - loaded_params: Set[str] = set() - - for name, loaded_weight in weights: - for (param_name, weight_name, shard_id) in stacked_params_mapping: - if weight_name not in name: - continue - name = name.replace(weight_name, param_name) - - param = params_dict[name] - weight_loader = param.weight_loader - weight_loader(param, loaded_weight, shard_id) - break - else: - if name.endswith("qkv.weight"): - visual_num_heads = self.num_heads - visual_embed_dim = self.embed_dim - head_size = visual_embed_dim // visual_num_heads - loaded_weight = loaded_weight.view(3, visual_num_heads, - head_size, - visual_embed_dim) - loaded_weight = loaded_weight.transpose(0, 1) - loaded_weight = loaded_weight.reshape(-1, visual_embed_dim) - elif name.endswith("qkv.bias"): - visual_num_heads = self.num_heads - visual_embed_dim = self.embed_dim - head_size = visual_embed_dim // visual_num_heads - loaded_weight = loaded_weight.view(3, visual_num_heads, - head_size) - loaded_weight = loaded_weight.transpose(0, 1) - loaded_weight = loaded_weight.reshape(-1) - - param = params_dict[name] - weight_loader = getattr(param, "weight_loader", - default_weight_loader) - weight_loader(param, loaded_weight) - loaded_params.add(name) - return loaded_params - # === Vision input helpers === # @@ -1127,21 +1082,27 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): prefix=maybe_prefix(prefix, "visual"), ) - self.language_model = init_vllm_registered_model( - vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "language_model"), - architectures=["Qwen2ForCausalLM"], - ) + self.model = Qwen2Model(vllm_config=vllm_config, + prefix=maybe_prefix(prefix, "model")) - self.make_empty_intermediate_tensors = ( - self.language_model.make_empty_intermediate_tensors) + if get_pp_group().is_last_rank: + if config.tie_word_embeddings: + self.lm_head = self.model.embed_tokens + else: + self.lm_head = ParallelLMHead(config.vocab_size, + config.hidden_size, + quant_config=quant_config, + prefix=maybe_prefix( + prefix, "lm_head")) + else: + self.lm_head = PPMissingLayer() - @cached_property - def sampler(self): - if hasattr(self.language_model, "sampler"): - return self.language_model.sampler + self.logits_processor = LogitsProcessor(config.vocab_size) + self.sampler = get_sampler() - return get_sampler() + self.make_empty_intermediate_tensors = ( + make_empty_intermediate_tensors_factory( + ["hidden_states", "residual"], config.hidden_size)) def _maybe_ignore_quant_config(self, quant_config: QuantizationConfig): # GPTQ configs do not have a list of ignored modules, however AutoGPTQ @@ -1300,7 +1261,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[List[Tuple[NestedTensors, str]]] = None, ) -> torch.Tensor: - inputs_embeds = self.language_model.get_input_embeddings(input_ids) + inputs_embeds = self.model.get_input_embeddings(input_ids) if multimodal_embeddings is not None: for embeddings, modality in multimodal_embeddings: if modality == "image": @@ -1369,7 +1330,7 @@ def forward( multimodal_embeddings) input_ids = None - hidden_states = self.language_model.model( + hidden_states = self.model( input_ids=input_ids, positions=positions, kv_caches=kv_caches, @@ -1379,28 +1340,80 @@ def forward( ) return hidden_states - def compute_logits( - self, - hidden_states: torch.Tensor, - sampling_metadata: SamplingMetadata, - ) -> Optional[torch.Tensor]: - return self.language_model.compute_logits(hidden_states, - sampling_metadata) + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head, hidden_states, + sampling_metadata) + return logits def sample( self, logits: torch.Tensor, sampling_metadata: SamplingMetadata, ) -> Optional[SamplerOutput]: - return self.language_model.sample(logits, sampling_metadata) + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]) -> Set[str]: - hf_to_vllm_mapper = WeightsMapper( - orig_to_new_prefix={ - "lm_head.": "language_model.lm_head.", - "model.": "language_model.model.", - }) - - loader = AutoWeightsLoader(self) - return loader.load_weights(weights, mapper=hf_to_vllm_mapper) + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ("gate_up_proj", "up_proj", 1), + ("gate_up_proj", "gate_proj", 0), + ] + params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() + for name, loaded_weight in weights: + if "rotary_emb.inv_freq" in name: + continue + if self.config.tie_word_embeddings and "lm_head.weight" in name: + continue + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + if is_pp_missing_parameter(name, self): + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + if "visual" in name and name.endswith("qkv.weight"): + visual_num_heads = self.config.vision_config.num_heads + visual_embed_dim = self.config.vision_config.embed_dim + head_size = visual_embed_dim // visual_num_heads + loaded_weight = loaded_weight.view(3, visual_num_heads, + head_size, + visual_embed_dim) + loaded_weight = loaded_weight.transpose(0, 1) + loaded_weight = loaded_weight.reshape(-1, visual_embed_dim) + elif "visual" in name and name.endswith("qkv.bias"): + visual_num_heads = self.config.vision_config.num_heads + visual_embed_dim = self.config.vision_config.embed_dim + head_size = visual_embed_dim // visual_num_heads + loaded_weight = loaded_weight.view(3, visual_num_heads, + head_size) + loaded_weight = loaded_weight.transpose(0, 1) + loaded_weight = loaded_weight.reshape(-1) + try: + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + if is_pp_missing_parameter(name, self): + continue + param = params_dict[name] + except KeyError: + raise ValueError(f"Unexpected weight: {name}") from None + + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index e69596aa915b5..c66fbce018a62 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -152,7 +152,6 @@ "LlavaNextForConditionalGeneration": ("llava_next", "LlavaNextForConditionalGeneration"), # noqa: E501 "LlavaNextVideoForConditionalGeneration": ("llava_next_video", "LlavaNextVideoForConditionalGeneration"), # noqa: E501 "LlavaOnevisionForConditionalGeneration": ("llava_onevision", "LlavaOnevisionForConditionalGeneration"), # noqa: E501 - "MantisForConditionalGeneration": ("llava", "MantisForConditionalGeneration"), # noqa: E501 "MiniCPMV": ("minicpmv", "MiniCPMV"), "MolmoForCausalLM": ("molmo", "MolmoForCausalLM"), "NVLM_D": ("nvlm_d", "NVLM_D_Model"), diff --git a/vllm/model_executor/models/solar.py b/vllm/model_executor/models/solar.py index caae0b65d7d10..f58710d215056 100644 --- a/vllm/model_executor/models/solar.py +++ b/vllm/model_executor/models/solar.py @@ -443,11 +443,10 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, config.vocab_size, logit_scale) + self.sampler = get_sampler() else: self.lm_head = PPMissingLayer() - self.sampler = get_sampler() - self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) diff --git a/vllm/model_executor/models/utils.py b/vllm/model_executor/models/utils.py index 269b66806adf4..7a1e1f9bf2be4 100644 --- a/vllm/model_executor/models/utils.py +++ b/vllm/model_executor/models/utils.py @@ -17,7 +17,7 @@ from vllm.multimodal import MultiModalPlaceholderMap, NestedTensors from vllm.platforms import _Backend, current_platform from vllm.sequence import IntermediateTensors -from vllm.utils import is_pin_memory_available, print_warning_once +from vllm.utils import is_pin_memory_available logger = init_logger(__name__) @@ -251,15 +251,12 @@ def init_vllm_registered_model( """ from vllm.model_executor.model_loader.loader import _initialize_model - if hf_config is None and architectures is not None: - # So that the architectures field is overridden - hf_config = vllm_config.model_config.hf_config - if hf_config is not None: - vllm_config = vllm_config.with_hf_config(hf_config, - architectures=architectures) + vllm_config = vllm_config.with_hf_config(hf_config) - return _initialize_model(vllm_config=vllm_config, prefix=prefix) + return _initialize_model(vllm_config=vllm_config, + prefix=prefix, + architectures=architectures) @overload @@ -409,42 +406,16 @@ def merge_multimodal_embeddings( input_ids: torch.Tensor, inputs_embeds: torch.Tensor, multimodal_embeddings: NestedTensors, - placeholder_token_id: Union[int, List[int]], + placeholder_token_id: int, ) -> torch.Tensor: """ Merge ``multimodal_embeddings`` into ``inputs_embeds`` by overwriting the positions in ``inputs_embeds`` corresponding to placeholder tokens in ``input_ids``. - - ``placeholder_token_id`` can be a list of token ids (e.g, token ids - of img_start, img_break, and img_end tokens) when needed: This means - the order of these tokens in the ``input_ids`` MUST MATCH the order of - their embeddings in ``multimodal_embeddings`` since we need to - slice-merge instead of individually scattering. - - For example, if input_ids is "TTTTTSIIIBIIIBIIIETTT", where - - T is text token - - S is image start token - - I is image embedding token - - B is image break token - - E is image end token. - - Then the image embeddings (that correspond to I's) from vision encoder - must be padded with embeddings of S, B, and E in the same order of - input_ids for a correct embedding merge. Note: This updates ``inputs_embeds`` in place. """ - if isinstance(placeholder_token_id, list): - placeholder_token_id = torch.tensor(placeholder_token_id, - device=input_ids.device) - return _merge_multimodal_embeddings( - inputs_embeds, - torch.isin(input_ids, placeholder_token_id), - multimodal_embeddings, - ) - return _merge_multimodal_embeddings( inputs_embeds, (input_ids == placeholder_token_id), @@ -621,7 +592,7 @@ def get_vit_attn_backend(support_fa: bool = False) -> _Backend: if is_flash_attn_2_available(): selected_backend = _Backend.FLASH_ATTN else: - print_warning_once( + logger.warning( "Current `vllm-flash-attn` has a bug inside vision module, " "so we use xformers backend instead. You can run " "`pip install flash-attn` to use flash-attention backend.") diff --git a/vllm/multimodal/base.py b/vllm/multimodal/base.py index 7dba94b885b6d..f93722523728d 100644 --- a/vllm/multimodal/base.py +++ b/vllm/multimodal/base.py @@ -226,16 +226,16 @@ def get_max_multimodal_tokens(self, model_config: "ModelConfig") -> int: """ # Avoid circular import from vllm.model_executor.model_loader import get_model_architecture - from vllm.model_executor.models import supports_multimodal model_cls, _ = get_model_architecture(model_config) - if not supports_multimodal(model_cls): + if model_cls not in self._input_mappers: return 0 max_mm_tokens = self._max_mm_tokens.get(model_cls) if max_mm_tokens is None: - return 0 + raise KeyError(f"No maximum number of multi-modal tokens is given " + f"for model class {model_cls.__name__} in {self}.") if callable(max_mm_tokens): mm_processor_kwargs = get_allowed_kwarg_only_overrides( @@ -326,47 +326,26 @@ def from_seq_group( src_ranges = [] dest_ranges = [] """ - seq_mm_data = seq_group.multi_modal_data - seq_mm_placeholders = seq_group.multi_modal_placeholders - - if not seq_mm_data or not seq_mm_placeholders: - return seq_mm_data, {} - - # For merged processor, we directly use mm_kwargs as mm_data - if isinstance(seq_mm_data, MultiModalKwargs): - placeholder_maps = dict[str, MultiModalPlaceholderMap]() - - for modality, placeholders in seq_mm_placeholders.items(): - placeholder_map = MultiModalPlaceholderMap() - - if positions: - placeholder_map.append_items_from_seq_group( - positions, - # Dummy, since we don't care about intersecting items - [None] * len(placeholders), - placeholders, - ) - - placeholder_maps[modality] = placeholder_map - - return seq_mm_data, placeholder_maps + if (not seq_group.multi_modal_data + or not seq_group.multi_modal_placeholders): + return seq_group.multi_modal_data, {} - mm_data = {**seq_mm_data} - placeholder_maps = defaultdict[str, MultiModalPlaceholderMap]( + mm_data = {**seq_group.multi_modal_data} + placeholder_maps: Dict[str, MultiModalPlaceholderMap] = defaultdict( MultiModalPlaceholderMap) - for modality, placeholders in seq_mm_placeholders.items(): + for ( + modality, + placeholders, + ) in seq_group.multi_modal_placeholders.items(): mm_items = mm_data.pop(modality) if not isinstance(mm_items, list): mm_items = [mm_items] if positions: - intersecting_items = placeholder_maps[modality] \ - .append_items_from_seq_group( - positions, - mm_items, - placeholders, - ) + intersecting_items = placeholder_maps[ + modality].append_items_from_seq_group( + positions, mm_items, placeholders) if intersecting_items: mm_data[modality] = intersecting_items diff --git a/vllm/multimodal/inputs.py b/vllm/multimodal/inputs.py index 229a8fbdf5831..640c7c04b8817 100644 --- a/vllm/multimodal/inputs.py +++ b/vllm/multimodal/inputs.py @@ -96,8 +96,7 @@ class PlaceholderRange(TypedDict): """The length of the placeholder.""" -NestedTensors = Union[List["NestedTensors"], List[torch.Tensor], torch.Tensor, - Tuple[torch.Tensor, ...]] +NestedTensors = Union[List["NestedTensors"], List[torch.Tensor], torch.Tensor] """ Uses a list instead of a tensor if the dimensions of each element do not match. """ diff --git a/vllm/multimodal/processing.py b/vllm/multimodal/processing.py index 922c83b6fd8a9..28c8dda581982 100644 --- a/vllm/multimodal/processing.py +++ b/vllm/multimodal/processing.py @@ -3,17 +3,16 @@ from collections.abc import Callable, ItemsView, Iterable, Mapping, Sequence from dataclasses import dataclass from functools import lru_cache -from typing import (Any, Dict, Generic, NamedTuple, Optional, Protocol, - TypeVar, Union, cast) +from itertools import groupby +from typing import Any, Generic, NamedTuple, Optional, Protocol, TypeVar, Union -import torch -from transformers import BatchFeature, ProcessorMixin +import numpy as np +from transformers import BatchFeature from typing_extensions import TypeAlias, TypedDict -from vllm.inputs import DummyData, InputProcessingContext +from vllm.inputs import InputProcessingContext from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer -from vllm.utils import (flatten_2d_lists, full_groupby, is_list_of, - resolve_mm_processor_kwargs) +from vllm.utils import flatten_2d_lists, full_groupby, is_list_of from .inputs import (AudioItem, ImageItem, MultiModalDataDict, MultiModalInputsV2, MultiModalKwargs, PlaceholderRange, @@ -257,6 +256,63 @@ def to_multi_format(data: MultiModalDataDict) -> dict[str, list[Any]]: return multi_data +class _TokenRun(NamedTuple): + token_id: int + + start_idx: int + length: int + + +def iter_token_runs(token_ids: list[int]) -> Iterable[_TokenRun]: + """ + Yield the starting index and length of each run of tokens that are the same. + """ + start_idx = 0 + + for token_id, it in groupby(token_ids): + length = sum(1 for _ in it) + yield _TokenRun(token_id=token_id, start_idx=start_idx, length=length) + + start_idx += length + + +class _PlaceholderInfo(NamedTuple): + modality: str + offset: int + length: int + + def to_range(self) -> PlaceholderRange: + return PlaceholderRange(offset=self.offset, length=self.length) + + +def iter_placeholders( + prompt_repls: Sequence[_BoundPromptReplacement[Any]], + token_ids: list[int], + *, + min_placeholder_count: int, +) -> Iterable[_PlaceholderInfo]: + """Yield each set of placeholder tokens found in :code:`token_ids`.""" + placeholder_ids_by_modality = { + modality: { + token_id + for prompt_repl in repls + for token_id in prompt_repl.repl_unit.token_ids + } + for modality, repls in full_groupby_modality(prompt_repls) + } + + for run_info in iter_token_runs(token_ids): + if run_info.length > min_placeholder_count: + for (modality, + placeholder_ids) in placeholder_ids_by_modality.items(): + if run_info.token_id in placeholder_ids: + yield _PlaceholderInfo( + modality=modality, + offset=run_info.start_idx, + length=run_info.length, + ) + + class _TokenMatch(NamedTuple): start_idx: int end_idx: int @@ -297,9 +353,13 @@ def start_idx(self) -> int: def end_idx(self) -> int: raise NotImplementedError - @property @abstractmethod - def repl_unit(self) -> _S: + def get_repl( + self, + mm_items: list[_T], + hf_inputs: BatchFeature, + item_idx: int, + ) -> _S: raise NotImplementedError def __repr__(self) -> str: @@ -320,9 +380,15 @@ def start_idx(self) -> int: def end_idx(self) -> int: return self.match.end_idx - @property - def repl_unit(self) -> list[int]: - return self.prompt_repl.repl_unit.token_ids + def get_repl( + self, + mm_items: list[_T], + hf_inputs: BatchFeature, + item_idx: int, + ) -> list[int]: + prompt_repl = self.prompt_repl + count = prompt_repl.get_count(mm_items, hf_inputs, item_idx) + return prompt_repl.repl_unit.token_ids * count @dataclass(repr=False) @@ -338,26 +404,15 @@ def start_idx(self) -> int: def end_idx(self) -> int: return self.match.end() - @property - def repl_unit(self) -> str: - return self.prompt_repl.repl_unit.text - - -class _PlaceholderInfo(NamedTuple): - modality: str - start_idx: int - unit: list[int] - unit_count: int - - @property - def length(self) -> int: - return len(self.unit) * self.unit_count - - def to_range(self) -> PlaceholderRange: - return PlaceholderRange( - offset=self.start_idx, - length=self.length, - ) + def get_repl( + self, + mm_items: list[_T], + hf_inputs: BatchFeature, + item_idx: int, + ) -> str: + prompt_repl = self.prompt_repl + count = prompt_repl.get_count(mm_items, hf_inputs, item_idx) + return prompt_repl.repl_unit.text * count def find_token_matches( @@ -392,17 +447,15 @@ def _resolve_matches( Resolve :code:`matches` to ensure that there are no overlapping matches, and sort them such that earlier matches take priority over later ones. """ - seen_matches: list[Optional[_PromptReplacementMatch[_T, _S]]] \ - = [None] * len(prompt) - + num_matches_by_idx = np.zeros(len(prompt), dtype=int) for match in matches: - for idx in range(match.start_idx, match.end_idx): - if seen_matches[idx] is not None: - raise ValueError("Found overlapping matches " - f"({seen_matches[idx]} and {match}) " - f"at index={idx} of prompt={prompt}") + num_matches_by_idx[match.start_idx:match.end_idx] += 1 - seen_matches[idx] = match + duplicate_matches_idxs, = np.nonzero(num_matches_by_idx > 1) + if len(duplicate_matches_idxs) > 0: + raise ValueError("Unable to find a unique replacement " + f"at indices={duplicate_matches_idxs} " + f"of prompt={prompt}") return sorted(matches, key=lambda x: x.start_idx) @@ -427,12 +480,9 @@ def _replace_matches( start_idx = match.start_idx end_idx = match.end_idx - repl_unit = match.repl_unit - repl_info = match.prompt_repl - repl_count = repl_info.get_count(mm_items, hf_inputs, item_idx) + repl_ids = match.get_repl(mm_items, hf_inputs, item_idx) - out_seqs.append(prompt[prev_end_idx:start_idx] + - repl_unit * repl_count) + out_seqs.append(prompt[prev_end_idx:start_idx] + repl_ids) prev_end_idx = end_idx next_idx_by_modality[modality] += 1 @@ -481,59 +531,9 @@ def replace_text_matches( return "".join(texts) -def _merge_placeholder_matches( - matches: Iterable[_PromptReplacementTokenMatch], -) -> Iterable[_PromptReplacementTokenMatch]: - current_match = None - - for match in sorted(matches, key=lambda x: x.start_idx): - if current_match is None: - current_match = match - elif (current_match.prompt_repl == match.prompt_repl - and current_match.end_idx == match.start_idx): - current_match = _PromptReplacementTokenMatch( - current_match.prompt_repl, - match=_TokenMatch(current_match.start_idx, match.end_idx), - ) - else: - yield current_match - current_match = match - - if current_match is not None: - yield current_match - - -def iter_placeholders( - prompt_repls: Sequence[_BoundPromptReplacement[Any]], - prompt: list[int], - *, - min_unit_count: int = 1, -) -> Iterable[_PlaceholderInfo]: - """Yield each set of placeholder tokens found in :code:`token_ids`.""" - if min_unit_count <= 0: - raise ValueError("`min_unit_count` must be a positive integer") - - matches = (_PromptReplacementTokenMatch(prompt_repl, match) - for prompt_repl in prompt_repls - if len(repl_unit := prompt_repl.repl_unit.token_ids) > 0 - for match in iter_token_matches(prompt, repl_unit)) - - for match in _merge_placeholder_matches(matches): - unit = match.repl_unit - placeholder = _PlaceholderInfo( - modality=match.modality, - start_idx=match.start_idx, - unit=unit, - unit_count=(match.end_idx - match.start_idx) // len(unit), - ) - - if placeholder.unit_count >= min_unit_count: - yield placeholder - - -class BaseMultiModalProcessor(ABC): +class MultiModalProcessor: """ - Abstract base class to process multi-modal inputs to be used in vLLM. + Helper class to process multi-modal inputs to be used in vLLM. """ def __init__( @@ -545,18 +545,6 @@ def __init__( self.ctx = ctx self.metadata = metadata - self.init_mm_processor_kwargs = (ctx.model_config.mm_processor_kwargs - or {}) - - def _get_hf_processor( - self, - **mm_processor_kwargs: Mapping[str, object], - ) -> ProcessorMixin: - # by default, we won't pass any kwargs to the processor initialization - return self.ctx.get_hf_processor() - - def _get_tokenizer(self) -> AnyTokenizer: - return self.ctx.tokenizer def __call__( self, @@ -574,13 +562,13 @@ def _find_placeholders( # To avoid false positives from multi-input when detecting # whether placeholder tokens have been inserted, in case # the target sequence is a subset of the replacement tokens - min_unit_count: int = 16, + min_placeholder_count: int = 16, ) -> list[_PlaceholderInfo]: return list( iter_placeholders( all_prompt_repls, new_token_ids, - min_unit_count=min_unit_count, + min_placeholder_count=min_placeholder_count, )) def _apply_hf_processor( @@ -589,62 +577,19 @@ def _apply_hf_processor( mm_data: MultiModalDataDict, mm_processor_kwargs: Mapping[str, object], ) -> BatchFeature: - # some mm_processor_kwargs may be used in processor initialization - # instead of processor call - processor_init_kwargs = { - **self.init_mm_processor_kwargs, + hf_processor = self.ctx.get_hf_processor() + + return hf_processor( + text=prompt, # type: ignore + **mm_data, **mm_processor_kwargs, - } - hf_processor = self._get_hf_processor(**processor_init_kwargs) - - processor_data = dict[str, Any]() - passthrough_data = dict[str, Any]() - for k, v in mm_data.items(): - # TODO: Make a separate modality for embedding inputs - # to avoid confusion - if k in ("image", "video", "audio"): - if isinstance(v, torch.Tensor) and v.ndim == 3: - # Pass through embedding inputs (single) - passthrough_data[f"{k}_embeds"] = [v] - elif is_list_of(v, torch.Tensor) and v[0].ndim == 2: - # Pass through embedding inputs (multi) - passthrough_data[f"{k}_embeds"] = v - else: - # Map keys to plural form, e.g.: image -> images - processor_data[f"{k}s"] = v - else: - processor_data[k] = v - - # filter mm_processor_kwargs used in processor call - mm_processor_kwargs = resolve_mm_processor_kwargs( - self.init_mm_processor_kwargs, - cast(Dict[str, Any], mm_processor_kwargs), - hf_processor, ) - try: - hf_inputs = hf_processor( - text=prompt, # type: ignore - **processor_data, - **mm_processor_kwargs, - return_tensors="pt", - ) - except Exception as exc: - data = dict(text=prompt, **processor_data) - - raise RuntimeError( - f"Failed to apply {type(hf_processor).__name__} " - f"on data={data} with kwargs={mm_processor_kwargs}") from exc - - hf_inputs.update(passthrough_data) - - return hf_inputs - def _bind_prompt_replacements( self, mm_data: MultiModalDataDict, ) -> list[_BoundPromptReplacement[Any]]: - tokenizer = self._get_tokenizer() + tokenizer = self.ctx.tokenizer return [ prompt_repl.bind(modality, tokenizer) @@ -659,7 +604,7 @@ def _apply_prompt_replacements( token_ids: list[int], prompt_repls: Sequence[_BoundPromptReplacement[Any]], ) -> tuple[list[int], str, list[_PlaceholderInfo]]: - tokenizer = self._get_tokenizer() + tokenizer = self.ctx.tokenizer mm_items = to_multi_format(mm_data) token_matches = find_token_matches(token_ids, prompt_repls) @@ -675,7 +620,7 @@ def _apply_prompt_replacements( # of the search text in the prompt, we instead perform string # replacement on the decoded token IDs, then encode them back. if all( - len(matches) >= len(mm_items[modality]) + len(matches) >= len(mm_data[modality]) for modality, matches in full_groupby_modality(token_matches) ): # yapf: disable token_ids = replace_token_matches( @@ -703,6 +648,15 @@ def _apply_prompt_replacements( placeholders = self._find_placeholders(matched_repls, token_ids) + # Sanity check + assert len(placeholders) == len(matched_repls), dict( + # Log this information for easier debugging + text=text, + token_ids=token_ids, + placeholders=placeholders, + matched_repls=matched_repls, + ) + return token_ids, text, placeholders def apply( @@ -724,7 +678,7 @@ def apply( 3. Extract information about the placeholder tokens from the processed token IDs. """ - tokenizer = self._get_tokenizer() + tokenizer = self.ctx.tokenizer hf_inputs = self._apply_hf_processor(prompt_text, mm_data, mm_processor_kwargs) @@ -763,59 +717,3 @@ def apply( mm_kwargs=mm_kwargs, mm_placeholders=mm_placeholders, ) - - @abstractmethod - def _get_dummy_mm_kwargs( - self, - mm_counts: Mapping[str, int], - ) -> MultiModalKwargs: - """ - Build the input that corresponds to `mm_max_tokens` in - :meth:`get_dummy_data`. - """ - raise NotImplementedError - - def get_dummy_data( - self, - seq_len: int, - mm_counts: Mapping[str, int], - mm_max_tokens: Mapping[str, int], - ) -> DummyData: - # Avoid circular import - from vllm.sequence import SequenceData - - tokenizer = self._get_tokenizer() - - mm_placeholders = dict[str, _PlaceholderInfo]() - offset = 0 - - for modality, max_tokens in mm_max_tokens.items(): - if max_tokens == 0: - continue - - metadata = self.metadata[modality] - repl = metadata.prompt_repls[0].bind(modality, tokenizer) - repl_token_ids = repl.repl_unit.token_ids - - placeholders = _PlaceholderInfo( - modality=modality, - start_idx=offset, - unit=repl_token_ids, - unit_count=max_tokens // len(repl_token_ids), - ) - - mm_placeholders[modality] = placeholders - offset += placeholders.length - - prompt_token_ids = flatten_2d_lists( - [p.unit * p.unit_count for p in mm_placeholders.values()]) - prompt_token_ids.extend([0] * (seq_len - len(prompt_token_ids))) - - return DummyData( - seq_data=SequenceData.from_seqs(prompt_token_ids), - multi_modal_data=self._get_dummy_mm_kwargs(mm_counts), - multi_modal_placeholders={ - modality: [p.to_range()] - for modality, p in mm_placeholders.items() - }, - ) diff --git a/vllm/multimodal/registry.py b/vllm/multimodal/registry.py index 6ab6c0fe2f12e..b73daee98bd80 100644 --- a/vllm/multimodal/registry.py +++ b/vllm/multimodal/registry.py @@ -15,7 +15,7 @@ from .base import MultiModalInputMapper, MultiModalPlugin, MultiModalTokensCalc from .image import ImagePlugin from .inputs import MultiModalDataDict, MultiModalKwargs, NestedTensors -from .processing import BaseMultiModalProcessor +from .processing import MultiModalProcessor from .video import VideoPlugin if TYPE_CHECKING: @@ -26,7 +26,7 @@ N = TypeVar("N", bound=Type[nn.Module]) MultiModalProcessorFactory: TypeAlias = Callable[[InputProcessingContext], - BaseMultiModalProcessor] + MultiModalProcessor] """ Constructs a :class:`MultiModalProcessor` instance from the context. @@ -200,27 +200,6 @@ def register_max_image_tokens( """ return self.register_max_multimodal_tokens("image", max_mm_tokens) - def get_max_tokens_by_modality( - self, - model_config: "ModelConfig", - ) -> Mapping[str, int]: - """ - Get the maximum number of tokens from each modality - for profiling the memory usage of a model. - - See :meth:`MultiModalPlugin.get_max_multimodal_tokens` for more details. - - Note: - This should be called after :meth:`init_mm_limits_per_prompt`. - """ - limits_per_plugin = self._limits_by_model[model_config] - - return { - key: (limits_per_plugin[key] * - plugin.get_max_multimodal_tokens(model_config)) - for key, plugin in self._plugins.items() - } - def get_max_multimodal_tokens(self, model_config: "ModelConfig") -> int: """ Get the maximum number of multi-modal tokens @@ -231,7 +210,11 @@ def get_max_multimodal_tokens(self, model_config: "ModelConfig") -> int: Note: This should be called after :meth:`init_mm_limits_per_prompt`. """ - return sum(self.get_max_tokens_by_modality(model_config).values()) + limits_per_plugin = self._limits_by_model[model_config] + + return sum((limits_per_plugin[key] * + plugin.get_max_multimodal_tokens(model_config)) + for key, plugin in self._plugins.items()) def init_mm_limits_per_prompt( self, @@ -287,8 +270,7 @@ def register_processor( factory: MultiModalProcessorFactory, ): """ - Register a multi-modal processor to a model class. The processor - is constructed lazily, hence a factory method should be passed. + Register a multi-modal processor to a model class. When the model receives multi-modal data, the provided function is invoked to transform the data into a dictionary of model inputs. @@ -325,7 +307,7 @@ def create_processor( self, model_config: "ModelConfig", tokenizer: AnyTokenizer, - ) -> BaseMultiModalProcessor: + ) -> MultiModalProcessor: """ Create a multi-modal processor for a specific model and tokenizer. """ diff --git a/vllm/multimodal/utils.py b/vllm/multimodal/utils.py index c898ca4e6573e..d4333b7519b47 100644 --- a/vllm/multimodal/utils.py +++ b/vllm/multimodal/utils.py @@ -535,13 +535,11 @@ def repeat_and_pad_placeholder_tokens( return new_prompt, new_token_ids, placeholder_ranges -def consecutive_placeholder_ranges( - num_items: int, - item_size: int, - initial_offset: int = 0) -> List[PlaceholderRange]: +def consecutive_placeholder_ranges(num_items: int, + item_size: int) -> List[PlaceholderRange]: """Returns a list of consecutive PlaceholderRanges of a fixed size""" return [ - PlaceholderRange(offset=initial_offset + i * item_size, - length=item_size) for i in range(num_items) + PlaceholderRange(offset=i * item_size, length=item_size) + for i in range(num_items) ] diff --git a/vllm/platforms/cpu.py b/vllm/platforms/cpu.py index e5142b985d1f2..680ee74129739 100644 --- a/vllm/platforms/cpu.py +++ b/vllm/platforms/cpu.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING import psutil import torch @@ -37,10 +37,6 @@ def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: def get_device_total_memory(cls, device_id: int = 0) -> int: return psutil.virtual_memory().total - @classmethod - def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: - return False - @classmethod def inference_mode(cls): return torch.no_grad() diff --git a/vllm/platforms/cuda.py b/vllm/platforms/cuda.py index ae1fd6d5ce068..846a1869da228 100644 --- a/vllm/platforms/cuda.py +++ b/vllm/platforms/cuda.py @@ -4,8 +4,7 @@ import os from functools import lru_cache, wraps -from typing import (TYPE_CHECKING, Callable, List, Optional, Tuple, TypeVar, - Union) +from typing import TYPE_CHECKING, Callable, List, TypeVar import pynvml import torch @@ -13,7 +12,6 @@ # import custom ops, trigger op registration import vllm._C # noqa -import vllm.envs as envs from vllm.logger import init_logger from .interface import DeviceCapability, Platform, PlatformEnum @@ -79,9 +77,7 @@ class CudaPlatformBase(Platform): dispatch_key: str = "CUDA" @classmethod - def get_device_capability(cls, - device_id: int = 0 - ) -> Optional[DeviceCapability]: + def get_device_capability(cls, device_id: int = 0) -> DeviceCapability: raise NotImplementedError @classmethod @@ -92,16 +88,6 @@ def get_device_name(cls, device_id: int = 0) -> str: def get_device_total_memory(cls, device_id: int = 0) -> int: raise NotImplementedError - @classmethod - def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: - if enforce_eager: - logger.warning( - "To see benefits of async output processing, enable CUDA " - "graph. Since, enforce-eager is enabled, async output " - "processor cannot be used") - return False - return True - @classmethod def is_full_nvlink(cls, device_ids: List[int]) -> bool: raise NotImplementedError @@ -114,28 +100,17 @@ def log_warnings(cls): def check_and_update_config(cls, vllm_config: VllmConfig) -> None: parallel_config = vllm_config.parallel_config scheduler_config = vllm_config.scheduler_config - if parallel_config.worker_cls == "auto": if scheduler_config.is_multi_step: - if envs.VLLM_USE_V1: - raise NotImplementedError - else: - parallel_config.worker_cls = \ - "vllm.worker.multi_step_worker.MultiStepWorker" + parallel_config.worker_cls = \ + "vllm.worker.multi_step_worker.MultiStepWorker" elif vllm_config.speculative_config: - if envs.VLLM_USE_V1: - raise NotImplementedError - else: - parallel_config.worker_cls = \ - "vllm.spec_decode.spec_decode_worker.create_spec_worker" - parallel_config.sd_worker_cls = \ - "vllm.worker.worker.Worker" + parallel_config.worker_cls = \ + "vllm.spec_decode.spec_decode_worker.create_spec_worker" + parallel_config.sd_worker_cls = \ + "vllm.worker.worker.Worker" else: - if envs.VLLM_USE_V1: - parallel_config.worker_cls = \ - "vllm.v1.worker.gpu_worker.Worker" - else: - parallel_config.worker_cls = "vllm.worker.worker.Worker" + parallel_config.worker_cls = "vllm.worker.worker.Worker" # NVML utils @@ -147,29 +122,11 @@ class NvmlCudaPlatform(CudaPlatformBase): @classmethod @lru_cache(maxsize=8) @with_nvml_context - def get_device_capability(cls, - device_id: int = 0 - ) -> Optional[DeviceCapability]: - try: - physical_device_id = device_id_to_physical_device_id(device_id) - handle = pynvml.nvmlDeviceGetHandleByIndex(physical_device_id) - major, minor = pynvml.nvmlDeviceGetCudaComputeCapability(handle) - return DeviceCapability(major=major, minor=minor) - except RuntimeError: - return None - - @classmethod - @lru_cache(maxsize=8) - @with_nvml_context - def has_device_capability( - cls, - capability: Union[Tuple[int, int], int], - device_id: int = 0, - ) -> bool: - try: - return super().has_device_capability(capability, device_id) - except RuntimeError: - return False + def get_device_capability(cls, device_id: int = 0) -> DeviceCapability: + physical_device_id = device_id_to_physical_device_id(device_id) + handle = pynvml.nvmlDeviceGetHandleByIndex(physical_device_id) + major, minor = pynvml.nvmlDeviceGetCudaComputeCapability(handle) + return DeviceCapability(major=major, minor=minor) @classmethod @lru_cache(maxsize=8) @@ -282,4 +239,4 @@ def is_full_nvlink(cls, physical_device_ids: List[int]) -> bool: if not isinstance(pynvml, _MockModule): CudaPlatform.log_warnings() except ModuleNotFoundError: - CudaPlatform.log_warnings() + CudaPlatform.log_warnings() \ No newline at end of file diff --git a/vllm/platforms/hpu.py b/vllm/platforms/hpu.py index ee83187fff797..2b1a1dad95753 100644 --- a/vllm/platforms/hpu.py +++ b/vllm/platforms/hpu.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING import torch @@ -20,10 +20,6 @@ class HpuPlatform(Platform): def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: return _Backend.HPU_ATTN - @classmethod - def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: - return True - @staticmethod def inference_mode(): return torch.no_grad() diff --git a/vllm/platforms/interface.py b/vllm/platforms/interface.py index db06d2c18e681..0be7df7941b8b 100644 --- a/vllm/platforms/interface.py +++ b/vllm/platforms/interface.py @@ -6,15 +6,11 @@ import numpy as np import torch -from vllm.logger import init_logger - if TYPE_CHECKING: from vllm.config import VllmConfig else: VllmConfig = None -logger = init_logger(__name__) - class _Backend(enum.Enum): FLASH_ATTN = enum.auto() @@ -151,13 +147,6 @@ def get_device_total_memory(cls, device_id: int = 0) -> int: """Get the total memory of a device in bytes.""" raise NotImplementedError - @classmethod - def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: - """ - Check if the current platform supports async output. - """ - raise NotImplementedError - @classmethod def inference_mode(cls): """A device-specific wrapper of `torch.inference_mode`. diff --git a/vllm/platforms/neuron.py b/vllm/platforms/neuron.py index 1e5c4bddfa24f..87655ea198303 100644 --- a/vllm/platforms/neuron.py +++ b/vllm/platforms/neuron.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING from .interface import Platform, PlatformEnum @@ -18,10 +18,6 @@ class NeuronPlatform(Platform): def get_device_name(cls, device_id: int = 0) -> str: return "neuron" - @classmethod - def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: - return False - @classmethod def check_and_update_config(cls, vllm_config: VllmConfig) -> None: parallel_config = vllm_config.parallel_config diff --git a/vllm/platforms/openvino.py b/vllm/platforms/openvino.py index e0f8e8b4b49fe..29b61e955d9ab 100644 --- a/vllm/platforms/openvino.py +++ b/vllm/platforms/openvino.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING import torch @@ -37,10 +37,6 @@ def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: def get_device_name(self, device_id: int = 0) -> str: return "openvino" - @classmethod - def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: - return False - @classmethod def inference_mode(self): return torch.inference_mode(mode=True) diff --git a/vllm/platforms/rocm.py b/vllm/platforms/rocm.py index 0133f26a0b1bc..3c14fbc179f69 100644 --- a/vllm/platforms/rocm.py +++ b/vllm/platforms/rocm.py @@ -1,6 +1,6 @@ import os from functools import lru_cache -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING import torch @@ -72,16 +72,6 @@ def get_device_total_memory(cls, device_id: int = 0) -> int: device_props = torch.cuda.get_device_properties(device_id) return device_props.total_memory - @classmethod - def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: - if enforce_eager: - logger.warning( - "To see benefits of async output processing, enable CUDA " - "graph. Since, enforce-eager is enabled, async output " - "processor cannot be used") - return False - return True - @classmethod def check_and_update_config(cls, vllm_config: VllmConfig) -> None: parallel_config = vllm_config.parallel_config @@ -93,8 +83,6 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: elif vllm_config.speculative_config: parallel_config.worker_cls = \ "vllm.spec_decode.spec_decode_worker.create_spec_worker" - parallel_config.sd_worker_cls = \ - "vllm.worker.worker.Worker" else: parallel_config.worker_cls = "vllm.worker.worker.Worker" diff --git a/vllm/platforms/tpu.py b/vllm/platforms/tpu.py index 10d874349f36b..b138f7e1c54c5 100644 --- a/vllm/platforms/tpu.py +++ b/vllm/platforms/tpu.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING import torch @@ -35,10 +35,6 @@ def get_device_name(cls, device_id: int = 0) -> str: def get_device_total_memory(cls, device_id: int = 0) -> int: raise NotImplementedError - @classmethod - def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: - return True - @classmethod def inference_mode(cls): return torch.no_grad() diff --git a/vllm/platforms/xpu.py b/vllm/platforms/xpu.py index 11dbd04d55671..9665786f4c499 100644 --- a/vllm/platforms/xpu.py +++ b/vllm/platforms/xpu.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING import torch @@ -41,10 +41,6 @@ def get_device_total_memory(cls, device_id: int = 0) -> int: device_props = torch.xpu.get_device_properties(device_id) return device_props.total_memory - @classmethod - def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: - return True - @staticmethod def inference_mode(): return torch.no_grad() diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index 17f604ea0e202..ae6e5c0a3481f 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -57,7 +57,7 @@ def load_general_plugins(): discovered_plugins = entry_points(group='vllm.general_plugins') if len(discovered_plugins) == 0: - logger.debug("No plugins found.") + logger.info("No plugins found.") return logger.info("Available plugins:") for plugin in discovered_plugins: diff --git a/vllm/spec_decode/spec_decode_worker.py b/vllm/spec_decode/spec_decode_worker.py index 2a9542c664500..ad62d6fb4513b 100644 --- a/vllm/spec_decode/spec_decode_worker.py +++ b/vllm/spec_decode/spec_decode_worker.py @@ -56,10 +56,6 @@ def create_spec_worker(*args, **kwargs) -> "SpecDecodeWorker": speculative_config: SpeculativeConfig = vllm_config.speculative_config assert speculative_config is not None - if vllm_config.parallel_config.pipeline_parallel_size > 1: - raise NotImplementedError("Speculative decoding is currently " - "incompatible with pipeline parallelism") - draft_worker_kwargs = kwargs.copy() kwargs["model_runner_cls"] = TargetModelRunner diff --git a/vllm/utils.py b/vllm/utils.py index b052a972bec53..320b14653854b 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -10,7 +10,6 @@ import inspect import ipaddress import os -import signal import socket import subprocess import sys @@ -25,9 +24,9 @@ from collections.abc import Iterable, Mapping from functools import lru_cache, partial, wraps from platform import uname -from typing import (TYPE_CHECKING, Any, AsyncGenerator, Awaitable, Callable, - Dict, Generic, Hashable, List, Literal, Optional, - OrderedDict, Set, Tuple, Type, TypeVar, Union, overload) +from typing import (Any, AsyncGenerator, Awaitable, Callable, Dict, Generic, + Hashable, List, Literal, Optional, OrderedDict, Set, Tuple, + Type, TypeVar, Union, overload) from uuid import uuid4 import numpy as np @@ -44,9 +43,6 @@ from vllm.logger import enable_trace_function_call, init_logger from vllm.platforms import current_platform -if TYPE_CHECKING: - from vllm.config import VllmConfig - logger = init_logger(__name__) # Exception strings for non-implemented encoder/decoder scenarios @@ -357,6 +353,17 @@ def random_uuid() -> str: return str(uuid.uuid4().hex) +@lru_cache(maxsize=None) +def get_vllm_instance_id() -> str: + """ + If the environment variable VLLM_INSTANCE_ID is set, return it. + Otherwise, return a random UUID. + Instance id represents an instance of the VLLM. All processes in the same + instance should have the same instance id. + """ + return envs.VLLM_INSTANCE_ID or f"vllm-instance-{random_uuid()}" + + @lru_cache(maxsize=None) def in_wsl() -> bool: # Reference: https://github.com/microsoft/WSL/issues/4071 @@ -1014,7 +1021,7 @@ def find_nccl_library() -> str: return so_file -def enable_trace_function_call_for_thread(vllm_config: "VllmConfig") -> None: +def enable_trace_function_call_for_thread() -> None: """Set up function tracing for the current thread, if enabled via the VLLM_TRACE_FUNCTION environment variable """ @@ -1026,8 +1033,7 @@ def enable_trace_function_call_for_thread(vllm_config: "VllmConfig") -> None: filename = (f"VLLM_TRACE_FUNCTION_for_process_{os.getpid()}" f"_thread_{threading.get_ident()}_" f"at_{datetime.datetime.now()}.log").replace(" ", "_") - log_path = os.path.join(tmp_dir, "vllm", - f"vllm-instance-{vllm_config.instance_id}", + log_path = os.path.join(tmp_dir, "vllm", get_vllm_instance_id(), filename) os.makedirs(os.path.dirname(log_path), exist_ok=True) enable_trace_function_call(log_path) @@ -1679,7 +1685,7 @@ def direct_register_custom_op( library object. If you want to bind the operator to a different library, make sure the library object is alive when the operator is used. """ - if is_in_doc_build() or not supports_custom_op(): + if is_in_doc_build(): return import torch.library if hasattr(torch.library, "infer_schema"): @@ -1703,28 +1709,3 @@ def resolve_obj_by_qualname(qualname: str) -> Any: module_name, obj_name = qualname.rsplit(".", 1) module = importlib.import_module(module_name) return getattr(module, obj_name) - - -def kill_process_tree(pid: int): - """ - Kills all descendant processes of the given pid by sending SIGKILL. - - Args: - pid (int): Process ID of the parent process - """ - try: - parent = psutil.Process(pid) - except psutil.NoSuchProcess: - return - - # Get all children recursively - children = parent.children(recursive=True) - - # Send SIGKILL to all children first - for child in children: - with contextlib.suppress(ProcessLookupError): - os.kill(child.pid, signal.SIGKILL) - - # Finally kill the parent - with contextlib.suppress(ProcessLookupError): - os.kill(pid, signal.SIGKILL) diff --git a/vllm/v1/attention/backends/flash_attn.py b/vllm/v1/attention/backends/flash_attn.py index 251a103e60f06..d37989055c2e5 100644 --- a/vllm/v1/attention/backends/flash_attn.py +++ b/vllm/v1/attention/backends/flash_attn.py @@ -138,25 +138,14 @@ def forward( # Profiling run. return output - # IMPORTANT! - # NOTE(woosuk): With piece-wise CUDA graphs, this method is executed in - # eager-mode PyTorch. Thus, we need to be careful about any CPU overhead - # in this method. For example, `view` and `slice` (or `[:n]`) operations - # are surprisingly slow even in the case they do not invoke any GPU ops. - # Minimize the PyTorch ops in this method as much as possible. - # Whenever making a change in this method, please benchmark the - # performance to make sure it does not introduce any overhead. - num_actual_tokens = attn_metadata.num_actual_tokens + # Reshape the input keys and values and store them in the cache. - # NOTE(woosuk): Here, key and value are padded while slot_mapping is - # not padded. However, we don't need to do key[:num_actual_tokens] and - # value[:num_actual_tokens] because the reshape_and_cache_flash op uses - # the slot_mapping's shape to determine the number of actual tokens. - key_cache, value_cache = kv_cache.unbind(0) + key_cache = kv_cache[0] + value_cache = kv_cache[1] torch.ops._C_cache_ops.reshape_and_cache_flash( - key, - value, + key[:num_actual_tokens], + value[:num_actual_tokens], key_cache, value_cache, attn_metadata.slot_mapping, diff --git a/vllm/v1/core/scheduler.py b/vllm/v1/core/scheduler.py index a3e85c20cc664..f1f26f4e8d443 100644 --- a/vllm/v1/core/scheduler.py +++ b/vllm/v1/core/scheduler.py @@ -5,8 +5,6 @@ from vllm.config import CacheConfig, LoRAConfig, SchedulerConfig from vllm.logger import init_logger -from vllm.multimodal import MultiModalKwargs -from vllm.multimodal.base import PlaceholderRange from vllm.sampling_params import SamplingParams from vllm.v1.core.encoder_cache_manager import EncoderCacheManager from vllm.v1.core.kv_cache_manager import KVCacheManager @@ -75,12 +73,12 @@ def __init__( # has the Transformer architecture (e.g., ViT). # FIXME(woosuk): Below are placeholder values. We need to calculate the # actual values from the configurations. - self.max_num_encoder_input_tokens = 16384 + self.max_num_encoder_input_tokens = 2048 # NOTE(woosuk): For the models without encoder (e.g., text-only models), # the encoder cache will not be initialized and used, regardless of # the cache size. This is because the memory space for the encoder cache # is preallocated in the profiling run. - self.encoder_cache_manager = EncoderCacheManager(cache_size=16384) + self.encoder_cache_manager = EncoderCacheManager(cache_size=2048) def schedule(self) -> "SchedulerOutput": # NOTE(woosuk) on the scheduling algorithm: @@ -385,7 +383,7 @@ def update_from_output( model_runner_output: "ModelRunnerOutput", ) -> List[EngineCoreOutput]: # NOTE(woosuk): This method doesn't consider speculative decoding. - sampled_token_ids = model_runner_output.sampled_token_ids + sampled_token_ids = model_runner_output.sampled_token_ids_cpu.tolist() num_scheduled_tokens = scheduler_output.num_scheduled_tokens new_running: List[Request] = [] engine_core_outputs: List[EngineCoreOutput] = [] diff --git a/vllm/v1/engine/async_llm.py b/vllm/v1/engine/async_llm.py index 26fd650aee4b7..4ef372fd8464b 100644 --- a/vllm/v1/engine/async_llm.py +++ b/vllm/v1/engine/async_llm.py @@ -20,7 +20,7 @@ from vllm.v1.engine.core_client import EngineCoreClient from vllm.v1.engine.detokenizer import Detokenizer from vllm.v1.engine.processor import Processor -from vllm.v1.executor.abstract import Executor +from vllm.v1.executor.gpu_executor import GPUExecutor logger = init_logger(__name__) @@ -30,7 +30,7 @@ class AsyncLLM(EngineClient): def __init__( self, vllm_config: VllmConfig, - executor_class: Type[Executor], + executor_class: Type[GPUExecutor], log_stats: bool, usage_context: UsageContext = UsageContext.ENGINE_CONTEXT, stat_loggers: Optional[Dict[str, StatLoggerBase]] = None, @@ -65,12 +65,7 @@ def __init__( input_registry) # Detokenizer (converts EngineCoreOutputs --> RequestOutput). - self.detokenizer = Detokenizer( - tokenizer_name=vllm_config.model_config.tokenizer, - tokenizer_mode=vllm_config.model_config.tokenizer_mode, - trust_remote_code=vllm_config.model_config.trust_remote_code, - revision=vllm_config.model_config.tokenizer_revision, - ) + self.detokenizer = Detokenizer(vllm_config.model_config.tokenizer) # EngineCore (starts the engine in background process). self.engine_core = EngineCoreClient.make_client( @@ -119,24 +114,14 @@ def from_engine_args( def shutdown(self): """Shutdown, cleaning up the background proc and IPC.""" - if engine_core := getattr(self, "engine_core", None): - engine_core.shutdown() + self.engine_core.shutdown() if handler := getattr(self, "output_handler", None): handler.cancel() @classmethod def _get_executor_cls(cls, vllm_config: VllmConfig): - distributed_executor_backend = ( - vllm_config.parallel_config.distributed_executor_backend) - if distributed_executor_backend == "mp": - from vllm.v1.executor.multiproc_executor import MultiprocExecutor - executor_class = MultiprocExecutor - else: - assert (distributed_executor_backend is None) - from vllm.v1.executor.uniproc_executor import UniprocExecutor - executor_class = UniprocExecutor - return executor_class + return GPUExecutor async def add_request( self, diff --git a/vllm/v1/engine/core.py b/vllm/v1/engine/core.py index fdb241e6753fb..751eb3b40a68d 100644 --- a/vllm/v1/engine/core.py +++ b/vllm/v1/engine/core.py @@ -1,12 +1,12 @@ import multiprocessing import pickle import queue -import signal import threading import time +from contextlib import contextmanager from multiprocessing.process import BaseProcess from multiprocessing.sharedctypes import Synchronized -from typing import List, Tuple, Type, Union +from typing import Any, Iterator, List, Tuple, Type, Union import zmq import zmq.asyncio @@ -20,10 +20,9 @@ EngineCoreProfile, EngineCoreRequest, EngineCoreRequestType) from vllm.v1.engine.mm_input_mapper import MMInputMapper -from vllm.v1.executor.abstract import Executor +from vllm.v1.executor.gpu_executor import GPUExecutor from vllm.v1.request import Request, RequestStatus from vllm.v1.serial_utils import PickleEncoder -from vllm.v1.utils import make_zmq_socket from vllm.version import __version__ as VLLM_VERSION logger = init_logger(__name__) @@ -39,7 +38,7 @@ class EngineCore: def __init__( self, vllm_config: VllmConfig, - executor_class: Type[Executor], + executor_class: Type[GPUExecutor], usage_context: UsageContext, ): assert vllm_config.model_config.task != "embedding" @@ -81,7 +80,7 @@ def _initialize_kv_caches(self, num_gpu_blocks = num_gpu_blocks_override num_cpu_blocks = 0 - self.model_executor.initialize(num_gpu_blocks) + self.model_executor.initialize_cache(num_gpu_blocks) elapsed = time.time() - start logger.info(("init engine (profile, create kv cache, " "warmup model) took %.2f seconds"), elapsed) @@ -113,11 +112,8 @@ def step(self) -> List[EngineCoreOutput]: scheduler_output, output) return engine_core_outputs - def shutdown(self): - self.model_executor.shutdown() - def profile(self, is_start=True): - self.model_executor.profile(is_start) + self.model_executor.worker.profile(is_start) class EngineCoreProc(EngineCore): @@ -128,7 +124,7 @@ class EngineCoreProc(EngineCore): def __init__( self, vllm_config: VllmConfig, - executor_class: Type[Executor], + executor_class: Type[GPUExecutor], usage_context: UsageContext, input_path: str, output_path: str, @@ -155,9 +151,32 @@ def __init__( daemon=True).start() # Send Readiness signal to EngineClient. - with make_zmq_socket(ready_path, zmq.constants.PUSH) as ready_socket: + with self.make_socket(ready_path, zmq.constants.PUSH) as ready_socket: ready_socket.send_string(EngineCoreProc.READY_STR) + @contextmanager + def make_socket(self, path: str, type: Any) -> Iterator[zmq.Socket]: + """Context manager for use """ + + ctx = zmq.Context() + try: + socket = ctx.socket(type) + + if type == zmq.constants.PULL: + socket.connect(path) + elif type == zmq.constants.PUSH: + socket.bind(path) + else: + raise ValueError(f"Unknown Socket Type: {type}") + + yield socket + + except KeyboardInterrupt: + logger.debug("EngineCore had Keyboard Interrupt.") + + finally: + ctx.destroy(linger=0) + @staticmethod def wait_for_startup( proc: BaseProcess, @@ -190,7 +209,7 @@ def wait_for_startup( @staticmethod def make_engine_core_process( vllm_config: VllmConfig, - executor_class: Type[Executor], + executor_class: Type[GPUExecutor], usage_context: UsageContext, input_path: str, output_path: str, @@ -225,38 +244,17 @@ def make_engine_core_process( def run_engine_core(*args, **kwargs): """Launch EngineCore busy loop in background process.""" - # Signal handler used for graceful termination. - # SystemExit exception is only raised once to allow this and worker - # processes to terminate without error - shutdown_requested = False - - def signal_handler(signum, frame): - nonlocal shutdown_requested - if not shutdown_requested: - shutdown_requested = True - raise SystemExit() - - # Either SIGTERM or SIGINT will terminate the engine_core - signal.signal(signal.SIGTERM, signal_handler) - signal.signal(signal.SIGINT, signal_handler) - - engine_core = None try: engine_core = EngineCoreProc(*args, **kwargs) engine_core.run_busy_loop() - except SystemExit: + except KeyboardInterrupt: logger.debug("EngineCore interrupted.") except BaseException as e: logger.exception(e) raise e - finally: - if engine_core is not None: - engine_core.shutdown() - engine_core = None - def run_busy_loop(self): """Core busy loop of the EngineCore.""" @@ -274,8 +272,6 @@ def run_busy_loop(self): logger.debug("EngineCore busy loop waiting.") if self.should_shutdown: return - except BaseException: - raise # 2) Handle any new client requests (Abort or Add). while not self.input_queue.empty(): @@ -325,7 +321,7 @@ def process_input_socket(self, input_path: str): decoder_add_req = PickleEncoder() decoder_abort_req = PickleEncoder() - with make_zmq_socket(input_path, zmq.constants.PULL) as socket: + with self.make_socket(input_path, zmq.constants.PULL) as socket: while True: # (RequestType, RequestData) type_frame, data_frame = socket.recv_multipart(copy=False) @@ -353,7 +349,7 @@ def process_output_socket(self, output_path: str): # Reuse send buffer. buffer = bytearray() - with make_zmq_socket(output_path, zmq.constants.PUSH) as socket: + with self.make_socket(output_path, zmq.constants.PUSH) as socket: while True: engine_core_outputs = self.output_queue.get() outputs = EngineCoreOutputs(outputs=engine_core_outputs) diff --git a/vllm/v1/engine/core_client.py b/vllm/v1/engine/core_client.py index ee89cece73141..835963f7ee86c 100644 --- a/vllm/v1/engine/core_client.py +++ b/vllm/v1/engine/core_client.py @@ -1,4 +1,5 @@ import multiprocessing +import time from typing import List, Union import msgspec @@ -6,7 +7,7 @@ import zmq.asyncio from vllm.logger import init_logger -from vllm.utils import get_open_zmq_ipc_path, kill_process_tree +from vllm.utils import get_open_zmq_ipc_path from vllm.v1.engine import (EngineCoreOutput, EngineCoreOutputs, EngineCoreProfile, EngineCoreRequest, EngineCoreRequestType) @@ -98,12 +99,6 @@ def add_request(self, request: EngineCoreRequest) -> None: def abort_requests(self, request_ids: List[str]) -> None: self.engine_core.abort_requests(request_ids) - def shutdown(self): - self.engine_core.shutdown() - - def __del__(self): - self.shutdown() - async def profile(self, is_start=True) -> None: self.engine_core.profile(is_start) @@ -168,10 +163,10 @@ def shutdown(self): # Shutdown the process if needed. if hasattr(self, "proc") and self.proc.is_alive(): self.proc.terminate() - self.proc.join(5) + time.sleep(5) if self.proc.is_alive(): - kill_process_tree(self.proc.pid) + self.proc.kill() def __del__(self): self.shutdown() diff --git a/vllm/v1/engine/llm_engine.py b/vllm/v1/engine/llm_engine.py index 1b3a9f12d009e..312c0242a45dd 100644 --- a/vllm/v1/engine/llm_engine.py +++ b/vllm/v1/engine/llm_engine.py @@ -1,7 +1,5 @@ from typing import Dict, List, Mapping, Optional, Type, Union -from typing_extensions import TypeVar - from vllm.config import VllmConfig from vllm.engine.arg_utils import EngineArgs from vllm.engine.metrics_types import StatLoggerBase @@ -14,18 +12,15 @@ from vllm.pooling_params import PoolingParams from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sampling_params import SamplingParams -from vllm.transformers_utils.tokenizer_group import ( - BaseTokenizerGroup, init_tokenizer_from_configs) +from vllm.transformers_utils.tokenizer_group import init_tokenizer_from_configs from vllm.usage.usage_lib import UsageContext from vllm.v1.engine.core_client import EngineCoreClient from vllm.v1.engine.detokenizer import Detokenizer from vllm.v1.engine.processor import Processor -from vllm.v1.executor.abstract import Executor +from vllm.v1.executor.gpu_executor import GPUExecutor logger = init_logger(__name__) -_G = TypeVar("_G", bound=BaseTokenizerGroup, default=BaseTokenizerGroup) - class LLMEngine: """Legacy LLMEngine for backwards compatibility.""" @@ -33,7 +28,7 @@ class LLMEngine: def __init__( self, vllm_config: VllmConfig, - executor_class: Type[Executor], + executor_class: Type[GPUExecutor], log_stats: bool, usage_context: UsageContext = UsageContext.ENGINE_CONTEXT, stat_loggers: Optional[Dict[str, StatLoggerBase]] = None, @@ -104,17 +99,10 @@ def from_engine_args( @classmethod def _get_executor_cls(cls, vllm_config: VllmConfig): - distributed_executor_backend = ( - vllm_config.parallel_config.distributed_executor_backend) - if distributed_executor_backend == "mp": - from vllm.v1.executor.multiproc_executor import MultiprocExecutor - executor_class = MultiprocExecutor - else: - assert (distributed_executor_backend is None) - from vllm.v1.executor.uniproc_executor import UniprocExecutor - executor_class = UniprocExecutor - - return executor_class + return GPUExecutor + + def stop_remote_worker_execution_loop(self) -> None: + raise NotImplementedError("TP not implemented yet.") def get_num_unfinished_requests(self) -> int: return self.detokenizer.get_num_unfinished_requests() @@ -181,18 +169,5 @@ def start_profile(self): def stop_profile(self): self.engine_core.profile(False) - def get_tokenizer_group( - self, - group_type: Type[_G] = BaseTokenizerGroup, - ) -> _G: - tokenizer_group = self.tokenizer - - if tokenizer_group is None: - raise ValueError("Unable to get tokenizer because " - "skip_tokenizer_init is True") - if not isinstance(tokenizer_group, group_type): - raise TypeError("Invalid type of tokenizer group. " - f"Expected type: {group_type}, but " - f"found type: {type(tokenizer_group)}") - - return tokenizer_group + def get_tokenizer_group(self, group_type): + pass diff --git a/vllm/v1/engine/mm_input_mapper.py b/vllm/v1/engine/mm_input_mapper.py index 7ad6882b04520..594c973678235 100644 --- a/vllm/v1/engine/mm_input_mapper.py +++ b/vllm/v1/engine/mm_input_mapper.py @@ -12,7 +12,6 @@ def __init__( model_config: ModelConfig, mm_registry: MultiModalRegistry = MULTIMODAL_REGISTRY, ): - self.model_config = model_config self.mm_registry = mm_registry self.multi_modal_input_mapper = mm_registry.create_input_mapper( model_config) @@ -33,7 +32,7 @@ def process_inputs( num_images = len(image_inputs) for i in range(num_images): mm_input = self.multi_modal_input_mapper( - {"image": image_inputs[i]}, + {"image": [image_inputs[i]]}, mm_processor_kwargs=mm_processor_kwargs, ) mm_inputs.append(mm_input) diff --git a/vllm/v1/engine/processor.py b/vllm/v1/engine/processor.py index 120fc64969552..7a1ea2530abda 100644 --- a/vllm/v1/engine/processor.py +++ b/vllm/v1/engine/processor.py @@ -7,8 +7,7 @@ from vllm.inputs.parse import is_encoder_decoder_inputs from vllm.inputs.preprocess import InputPreprocessor from vllm.lora.request import LoRARequest -from vllm.multimodal import (MULTIMODAL_REGISTRY, MultiModalKwargs, - MultiModalRegistry) +from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalRegistry from vllm.pooling_params import PoolingParams from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sampling_params import SamplingParams @@ -102,15 +101,10 @@ def process_inputs( self.generation_config_fields, eos_token_id) # Preprocess multi-modal data - if len(decoder_inputs.multi_modal_data) == 0: - mm_inputs = None - elif isinstance(decoder_inputs.multi_modal_data, MultiModalKwargs): - mm_inputs = [decoder_inputs.multi_modal_data] - else: - mm_inputs = self.mm_input_mapper.process_inputs( - decoder_inputs.multi_modal_data, - decoder_inputs.mm_processor_kwargs, - ) + mm_inputs = self.mm_input_mapper.process_inputs( + decoder_inputs.multi_modal_data, + decoder_inputs.mm_processor_kwargs) if len( + decoder_inputs.multi_modal_data) > 0 else None # Make Request for Detokenizer. detokenizer_request = DetokenizerRequest( diff --git a/vllm/v1/executor/abstract.py b/vllm/v1/executor/abstract.py deleted file mode 100644 index 9cd267581ad18..0000000000000 --- a/vllm/v1/executor/abstract.py +++ /dev/null @@ -1,48 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Dict, Optional, Tuple - -from vllm.config import VllmConfig -from vllm.v1.outputs import ModelRunnerOutput - - -class Executor(ABC): - """Abstract class for executors.""" - - @abstractmethod - def __init__(self, vllm_config: VllmConfig) -> None: - raise NotImplementedError - - @abstractmethod - def initialize(self, num_gpu_blocks: int) -> None: - raise NotImplementedError - - @abstractmethod - def determine_num_available_blocks(self) -> Tuple[int, int]: - raise NotImplementedError - - @abstractmethod - def execute_model( - self, - scheduler_output, - ) -> ModelRunnerOutput: - raise NotImplementedError - - @abstractmethod - def profile(self, is_start=True): - raise NotImplementedError - - @abstractmethod - def shutdown(self): - pass - - @abstractmethod - def check_health(self) -> None: - raise NotImplementedError - - @abstractmethod - def collective_rpc(self, - method: str, - timeout: Optional[float] = None, - args: Tuple = (), - kwargs: Optional[Dict] = None) -> []: - raise NotImplementedError diff --git a/vllm/v1/executor/uniproc_executor.py b/vllm/v1/executor/gpu_executor.py similarity index 90% rename from vllm/v1/executor/uniproc_executor.py rename to vllm/v1/executor/gpu_executor.py index 9b1d9a40950c6..f71fa16b16e27 100644 --- a/vllm/v1/executor/uniproc_executor.py +++ b/vllm/v1/executor/gpu_executor.py @@ -10,7 +10,7 @@ logger = init_logger(__name__) -class UniprocExecutor: +class GPUExecutor: def __init__(self, vllm_config: VllmConfig) -> None: self.vllm_config = vllm_config @@ -54,7 +54,7 @@ def determine_num_available_blocks(self) -> Tuple[int, int]: """ return self.worker.determine_num_available_blocks() - def initialize(self, num_gpu_blocks: int) -> None: + def initialize_cache(self, num_gpu_blocks: int) -> None: """Initialize the KV cache by invoking the underlying worker. """ # NOTE: This is logged in the executor because there can be >1 worker @@ -71,13 +71,7 @@ def execute_model( output = self.worker.execute_model(scheduler_output) return output - def profile(self, is_start: bool = True): - self.worker.profile(is_start) - - def shutdown(self): - self.worker = None - def check_health(self) -> None: - # UniprocExecutor will always be healthy as long as + # GPUExecutor will always be healthy as long as # it's running. return diff --git a/vllm/v1/executor/multiproc_executor.py b/vllm/v1/executor/multiproc_executor.py deleted file mode 100644 index f8f3d583618cf..0000000000000 --- a/vllm/v1/executor/multiproc_executor.py +++ /dev/null @@ -1,375 +0,0 @@ -import atexit -import os -import pickle -import signal -import sys -import time -from dataclasses import dataclass -from enum import Enum, auto -from multiprocessing.process import BaseProcess -from typing import Dict, List, Optional, Tuple - -import zmq - -from vllm.config import VllmConfig -from vllm.distributed import (destroy_distributed_environment, - destroy_model_parallel) -from vllm.distributed.device_communicators.shm_broadcast import (Handle, - MessageQueue) -from vllm.executor.multiproc_worker_utils import ( - _add_prefix, get_mp_context, set_multiprocessing_worker_envs) -from vllm.logger import init_logger -from vllm.utils import (get_distributed_init_method, get_open_port, - get_open_zmq_ipc_path) -from vllm.v1.outputs import ModelRunnerOutput -from vllm.v1.utils import make_zmq_socket -from vllm.worker.worker_base import WorkerWrapperBase - -logger = init_logger(__name__) - -POLLING_TIMEOUT_MS = 5000 -POLLING_TIMEOUT_S = POLLING_TIMEOUT_MS // 1000 - - -class MultiprocExecutor: - - def __init__(self, vllm_config: VllmConfig) -> None: - # Call self.shutdown at exit to clean up - # and ensure workers will be terminated. - atexit.register(self.shutdown) - - self.vllm_config = vllm_config - self.parallel_config = vllm_config.parallel_config - - self.world_size = self.parallel_config.world_size - tensor_parallel_size = self.parallel_config.tensor_parallel_size - assert self.world_size == tensor_parallel_size, ( - f"world_size ({self.world_size}) must be equal to the " - f"tensor_parallel_size ({tensor_parallel_size}). " - f"Pipeline parallelism is not yet implemented in v1") - - # Set multiprocessing envs that are common to V0 and V1 - set_multiprocessing_worker_envs(self.parallel_config) - - # Multiprocessing-based executor does not support multi-node setting. - # Since it only works for single node, we can use the loopback address - # 127.0.0.1 for communication. - distributed_init_method = get_distributed_init_method( - "127.0.0.1", get_open_port()) - - # Initialize worker and set up message queues for SchedulerOutputs - # and ModelRunnerOutputs - self.rpc_broadcast_mq = MessageQueue(self.world_size, self.world_size) - scheduler_output_handle = self.rpc_broadcast_mq.export_handle() - - # Create workers - self.workers: List[WorkerProcHandle] = [] - for rank in range(self.world_size): - worker = WorkerProc.make_worker_process(vllm_config, rank, rank, - distributed_init_method, - scheduler_output_handle) - self.workers.append(worker) - - # Ensure message queues are ready. Will deadlock if re-ordered - # Must be kept consistent with the WorkerProc - self.rpc_broadcast_mq.wait_until_ready() - for w in self.workers: - w.worker_response_mq.wait_until_ready() - - def initialize(self, num_gpu_blocks: int) -> None: - """ - Initialize the KV caches and begin the model execution loop of the - underlying workers. - """ - self.collective_rpc("initialize_cache", args=(num_gpu_blocks, )) - self.collective_rpc("compile_or_warm_up_model") - - def determine_num_available_blocks(self) -> Tuple[int, int]: - """ - Determine the number of available KV blocks by invoking the - underlying worker. - """ - num_blocks = self.collective_rpc("determine_num_available_blocks") - - # Since we use a shared centralized controller, we take the minimum - # number of blocks across all workers to make sure all the memory - # operators can be applied to all workers. - num_gpu_blocks = min(b[0] for b in num_blocks) - num_cpu_blocks = min(b[1] for b in num_blocks) - - return num_gpu_blocks, num_cpu_blocks - - def collective_rpc(self, - method: str, - timeout: Optional[float] = None, - args: Tuple = (), - kwargs: Optional[Dict] = None) -> []: - """ - Execute an RPC call on workers. - - Args: - method: Name of the worker method to execute - timeout: Maximum time in seconds to wait for execution. Rases a - TimeoutError on timeout. None means wait indefinitely. - args: Positional arguments to pass to the worker method - kwargs: Keyword arguments to pass to the worker method - - Returns: - List of results from each worker - """ - start_time = time.monotonic() - kwargs = kwargs or {} - - try: - self.rpc_broadcast_mq.enqueue((method, args, kwargs)) - - responses = [None] * self.world_size - for w in self.workers: - dequeue_timeout = timeout - (time.monotonic() - start_time() - ) if timeout is not None else None - status, result = w.worker_response_mq.dequeue( - timeout=dequeue_timeout) - - if status != WorkerProc.ResponseStatus.SUCCESS: - if isinstance(result, Exception): - raise result - else: - raise RuntimeError("Worker failed") - - responses[w.rank] = result - - return responses - except TimeoutError as e: - raise TimeoutError(f"RPC call to {method} timed out.") from e - except Exception as e: - # Re-raise any other exceptions - raise e - - def execute_model( - self, - scheduler_output, - ) -> ModelRunnerOutput: - model_output = self.collective_rpc("execute_model", - args=(scheduler_output, ))[0] - return model_output - - def profile(self, is_start=True): - self.collective_rpc("profile", args=(is_start, )) - return - - def _ensure_worker_termination(self): - """Ensure that all worker processes are terminated. Assumes workers have - received termination requests. Waits for processing, then sends - termination and kill signals if needed.""" - - def wait_for_termination(procs, timeout): - start_time = time.time() - while time.time() - start_time < timeout: - if all(not proc.is_alive() for proc in procs): - return True - time.sleep(0.1) - return False - - # Send SIGTERM if still running - active_procs = [w.proc for w in self.workers if w.proc.is_alive()] - self.workers = None - for p in active_procs: - p.terminate() - if wait_for_termination(active_procs, 4): - return - - # Send SIGKILL if still running - active_procs = [p for p in active_procs if p.is_alive()] - for p in active_procs: - p.kill() - - def shutdown(self): - """Properly shut down the executor and its workers""" - if (hasattr(self, 'workers') and self.workers is not None): - for w in self.workers: #TODO: not sure if needed - w.worker_response_mq = None - self._ensure_worker_termination() - - self.rpc_broadcast_mq = None - - def check_health(self) -> None: - self.collective_rpc("check_health", timeout=10) - return - - -@dataclass -class WorkerProcHandle: - proc: BaseProcess - rank: int - ready_path: str - worker_response_mq: MessageQueue # The worker process writes to this MQ - - -class WorkerProc: - """Wrapper that runs one Worker in a separate process.""" - - READY_STR = "READY" - - def __init__( - self, - vllm_config: VllmConfig, - local_rank: int, - rank: int, - distributed_init_method: str, - input_shm_handle: Handle, - ready_path: str, - ): - self.rank = rank - wrapper = WorkerWrapperBase(vllm_config=vllm_config) - wrapper.init_worker(vllm_config, local_rank, rank, - distributed_init_method) - self.worker = wrapper.worker - - pid = os.getpid() - _add_prefix(sys.stdout, f"VllmWorker rank={rank}", pid) - _add_prefix(sys.stderr, f"VllmWorker rank={rank}", pid) - - # Initialize MessageQueue for receiving SchedulerOutput - self.rpc_broadcast_mq = MessageQueue.create_from_handle( - input_shm_handle, self.worker.rank) - - # Initializes a message queue for sending the model output - self.worker_response_mq = MessageQueue(1, 1) - worker_response_mq_handle = self.worker_response_mq.export_handle() - - # Send Readiness signal to EngineCore process. - with make_zmq_socket(ready_path, zmq.constants.PUSH) as ready_socket: - payload = pickle.dumps(worker_response_mq_handle, - protocol=pickle.HIGHEST_PROTOCOL) - ready_socket.send_string(WorkerProc.READY_STR) - ready_socket.send(payload) - - self.worker.initialize() - self.worker.load_model() - - @staticmethod - def make_worker_process( - vllm_config: VllmConfig, - local_rank: int, - rank: int, - distributed_init_method: str, - input_shm_handle, # Receive SchedulerOutput - ) -> WorkerProcHandle: - context = get_mp_context() - - # ZMQ path for worker to send ready message and shm_broadcast handle - # back to core process. - ready_path = get_open_zmq_ipc_path() - - process_kwargs = { - "vllm_config": vllm_config, - "local_rank": local_rank, - "rank": rank, - "distributed_init_method": distributed_init_method, - "input_shm_handle": input_shm_handle, - "ready_path": ready_path, - } - # Run EngineCore busy loop in background process. - proc = context.Process(target=WorkerProc.worker_main, - kwargs=process_kwargs, - daemon=True) - proc.start() - - # Wait for startup - worker_response_mq_handle = WorkerProc.wait_for_startup( - proc, ready_path) - - worker_response_mq = MessageQueue.create_from_handle( - worker_response_mq_handle, 0) - - return WorkerProcHandle(proc, rank, ready_path, worker_response_mq) - - def shutdown(self): - self.rpc_broadcast_mq = None - self.worker_response_mq = None - destroy_model_parallel() - destroy_distributed_environment() - - @staticmethod - def worker_main(*args, **kwargs): - """ Worker initialization and execution loops. - This runs a background process """ - - # Signal handler used for graceful termination. - # SystemExit exception is only raised once to allow this and worker - # processes to terminate without error - shutdown_requested = False - - def signal_handler(signum, frame): - nonlocal shutdown_requested - if not shutdown_requested: - shutdown_requested = True - raise SystemExit() - - # Either SIGTERM or SIGINT will terminate the worker - signal.signal(signal.SIGTERM, signal_handler) - signal.signal(signal.SIGINT, signal_handler) - - worker = None - try: - worker = WorkerProc(*args, **kwargs) - - # Ensure message queues are ready. Will deadlock if re-ordered. - # Must be kept consistent with the Executor - worker.rpc_broadcast_mq.wait_until_ready() - worker.worker_response_mq.wait_until_ready() - - worker.worker_busy_loop() - - except SystemExit: - logger.debug("Worker interrupted.") - - except BaseException as e: - logger.exception(e) - raise - - finally: - # Clean up once worker exits busy loop - if worker is not None: - worker.shutdown() - worker = None - - @staticmethod - def wait_for_startup( - proc: BaseProcess, - ready_path: str, - ) -> Optional[Handle]: - """Wait until the Worker is ready.""" - with make_zmq_socket(ready_path, zmq.constants.PULL) as socket: - - # Wait for Worker to send READY. - while socket.poll(timeout=POLLING_TIMEOUT_MS) == 0: - logger.debug("Waiting for WorkerProc to startup.") - - if not proc.is_alive(): - raise RuntimeError("WorkerProc failed to start.") - - message = socket.recv_string() - assert message == WorkerProc.READY_STR - handle_frame = socket.recv(copy=False) - handle = pickle.loads(handle_frame.buffer) - return handle - - class ResponseStatus(Enum): - SUCCESS = auto() - FAILURE = auto() - - def worker_busy_loop(self): - """Main busy loop for Multiprocessing Workers""" - while True: - method, args, kwargs = self.rpc_broadcast_mq.dequeue() - - try: - output = getattr(self.worker, method)(*args, **kwargs) - except BaseException as e: - self.worker_response_mq.enqueue( - (WorkerProc.ResponseStatus.FAILURE, e)) - continue - - self.worker_response_mq.enqueue( - (WorkerProc.ResponseStatus.SUCCESS, output)) diff --git a/vllm/v1/outputs.py b/vllm/v1/outputs.py index acc3a944e21b9..8574987728844 100644 --- a/vllm/v1/outputs.py +++ b/vllm/v1/outputs.py @@ -8,7 +8,7 @@ class SamplerOutput: # [num_reqs] - sampled_token_ids: List[int] + sampled_token_ids: torch.Tensor # [num_reqs, max_num_logprobs + 1] logprob_token_ids: Optional[torch.Tensor] @@ -20,8 +20,6 @@ class SamplerOutput: prompt_logprobs: Optional[torch.Tensor] -# ModelRunnerOutput is serialized and sent to the scheduler process. -# This is expensive for torch.Tensor so prefer to use List instead. @dataclass class ModelRunnerOutput: @@ -31,7 +29,7 @@ class ModelRunnerOutput: req_id_to_index: Dict[str, int] # [num_reqs] - sampled_token_ids: List[int] + sampled_token_ids_cpu: torch.Tensor # [num_reqs, max_num_logprobs + 1] logprob_token_ids_cpu: Optional[torch.Tensor] diff --git a/vllm/v1/sample/sampler.py b/vllm/v1/sample/sampler.py index d1a755be01ff7..927f274541c4d 100644 --- a/vllm/v1/sample/sampler.py +++ b/vllm/v1/sample/sampler.py @@ -37,9 +37,8 @@ def forward( topk_logprobs = None topk_indices = None - # NOTE: CPU-GPU synchronization happens here. sampler_output = SamplerOutput( - sampled_token_ids=sampled.tolist(), + sampled_token_ids=sampled, logprob_token_ids=topk_indices, logprobs=topk_logprobs, prompt_logprob_token_ids=None, diff --git a/vllm/v1/utils.py b/vllm/v1/utils.py index 6e7a7d4fe12cd..4b26749712e32 100644 --- a/vllm/v1/utils.py +++ b/vllm/v1/utils.py @@ -1,11 +1,4 @@ -from contextlib import contextmanager -from typing import Any, Generic, Iterator, List, TypeVar, overload - -import zmq - -from vllm.logger import init_logger - -logger = init_logger(__name__) +from typing import Generic, List, TypeVar, overload T = TypeVar("T") @@ -69,27 +62,3 @@ def __contains__(self, item): def __len__(self): return len(self._x) - - -@contextmanager -def make_zmq_socket(path: str, type: Any) -> Iterator[zmq.Socket]: - """Context manager for a ZMQ socket""" - - ctx = zmq.Context() - try: - socket = ctx.socket(type) - - if type == zmq.constants.PULL: - socket.connect(path) - elif type == zmq.constants.PUSH: - socket.bind(path) - else: - raise ValueError(f"Unknown Socket Type: {type}") - - yield socket - - except KeyboardInterrupt: - logger.debug("Worker had Keyboard Interrupt.") - - finally: - ctx.destroy(linger=0) diff --git a/vllm/v1/worker/gpu_input_batch.py b/vllm/v1/worker/gpu_input_batch.py deleted file mode 100644 index 25d95ac6e26af..0000000000000 --- a/vllm/v1/worker/gpu_input_batch.py +++ /dev/null @@ -1,285 +0,0 @@ -# Datastructures defining an input batch - -from dataclasses import dataclass -from typing import TYPE_CHECKING, Dict, List, Optional, Set - -import numpy as np -import torch - -from vllm.multimodal import MultiModalKwargs -from vllm.sampling_params import SamplingParams, SamplingType -from vllm.v1.sample.metadata import SamplingMetadata - -if TYPE_CHECKING: - from vllm.multimodal.inputs import PlaceholderRange - - -@dataclass -class CachedRequestState: - - req_id: str - prompt_token_ids: List[int] - prompt: Optional[str] - mm_inputs: List[MultiModalKwargs] - mm_positions: List["PlaceholderRange"] - sampling_params: SamplingParams - generator: Optional[torch.Generator] - - block_ids: List[int] - num_computed_tokens: int - output_token_ids: List[int] - - @property - def num_tokens(self) -> int: - return len(self.prompt_token_ids) + len(self.output_token_ids) - - -class InputBatch: - - def __init__( - self, - max_num_reqs: int, - max_model_len: int, - max_num_blocks_per_req: int, - device: torch.device, - pin_memory: bool, - ): - self.max_num_reqs = max_num_reqs - self.max_model_len = max_model_len - self.max_num_blocks_per_req = max_num_blocks_per_req - self.device = device - self.pin_memory = pin_memory - - self.req_ids: List[Optional[str]] = [None] * max_num_reqs - self.req_id_to_index: Dict[str, int] = {} - - self.token_ids_cpu = np.empty((max_num_reqs, max_model_len), - dtype=np.int32) - self.num_computed_tokens_cpu = np.empty(max_num_reqs, dtype=np.int32) - - # Attention-related. - self.block_table = torch.zeros((max_num_reqs, max_num_blocks_per_req), - device=self.device, - dtype=torch.int32) - self.block_table_cpu_tensor = torch.zeros( - (max_num_reqs, max_num_blocks_per_req), - device="cpu", - dtype=torch.int32, - pin_memory=pin_memory, - ) - self.block_table_cpu = self.block_table_cpu_tensor.numpy() - - # Sampling-related. - self.temperature = torch.empty((max_num_reqs, ), - dtype=torch.float32, - device=device) - self.temperature_cpu_tensor = torch.empty((max_num_reqs, ), - dtype=torch.float32, - device="cpu", - pin_memory=pin_memory) - self.temperature_cpu = self.temperature_cpu_tensor.numpy() - self.greedy_reqs: Set[str] = set() - self.random_reqs: Set[str] = set() - - self.top_p = torch.empty((max_num_reqs, ), - dtype=torch.float32, - device=device) - self.top_p_cpu_tensor = torch.empty((max_num_reqs, ), - dtype=torch.float32, - device="cpu", - pin_memory=pin_memory) - self.top_p_cpu = self.top_p_cpu_tensor.numpy() - self.top_p_reqs: Set[str] = set() - - self.top_k = torch.empty((max_num_reqs, ), - dtype=torch.int32, - device=device) - self.top_k_cpu_tensor = torch.empty((max_num_reqs, ), - dtype=torch.int32, - device="cpu", - pin_memory=pin_memory) - self.top_k_cpu = self.top_k_cpu_tensor.numpy() - self.top_k_reqs: Set[str] = set() - - # req_index -> generator - # NOTE(woosuk): The indices of the requests that do not have their own - # generator should not be included in the dictionary. - self.generators: Dict[int, torch.Generator] = {} - - self.num_logprobs: Dict[str, int] = {} - self.prompt_logprob_reqs: Set[str] = set() - - def add_request( - self, - request: "CachedRequestState", - req_index: Optional[int] = None, - ) -> None: - if req_index is None: - req_index = self.num_reqs - assert req_index < self.max_num_reqs - - req_id = request.req_id - self.req_ids[req_index] = req_id - self.req_id_to_index[req_id] = req_index - - # Copy the prompt token ids and output token ids. - num_prompt_tokens = len(request.prompt_token_ids) - self.token_ids_cpu[ - req_index, :num_prompt_tokens] = request.prompt_token_ids - start_idx = num_prompt_tokens - end_idx = start_idx + len(request.output_token_ids) - self.token_ids_cpu[req_index, - start_idx:end_idx] = request.output_token_ids - - self.num_computed_tokens_cpu[req_index] = request.num_computed_tokens - num_blocks = len(request.block_ids) - self.block_table_cpu[req_index, :num_blocks] = request.block_ids - - sampling_params = request.sampling_params - self.temperature_cpu[req_index] = sampling_params.temperature - if sampling_params.sampling_type == SamplingType.GREEDY: - self.greedy_reqs.add(req_id) - else: - self.random_reqs.add(req_id) - - self.top_p_cpu[req_index] = sampling_params.top_p - if sampling_params.top_p < 1: - self.top_p_reqs.add(req_id) - self.top_k_cpu[req_index] = sampling_params.top_k - if sampling_params.top_k > 0: - self.top_k_reqs.add(req_id) - - # NOTE(woosuk): self.generators should not include the requests that - # do not have their own generator. - if request.generator is not None: - self.generators[req_index] = request.generator - - num_logprobs = sampling_params.logprobs - if num_logprobs is not None and num_logprobs > 0: - self.num_logprobs[req_id] = num_logprobs - if sampling_params.prompt_logprobs: - self.prompt_logprob_reqs.add(req_id) - - def remove_request(self, req_id: str) -> Optional[int]: - req_index = self.req_id_to_index.pop(req_id, None) - if req_index is None: - return None - self.req_ids[req_index] = None - - self.greedy_reqs.discard(req_id) - self.random_reqs.discard(req_id) - self.top_p_reqs.discard(req_id) - self.top_k_reqs.discard(req_id) - self.generators.pop(req_index, None) - self.num_logprobs.pop(req_id, None) - self.prompt_logprob_reqs.discard(req_id) - return req_index - - def clear(self) -> None: - self.req_ids = [None] * self.max_num_reqs - self.req_id_to_index.clear() - self.greedy_reqs.clear() - self.random_reqs.clear() - self.top_p_reqs.clear() - self.top_k_reqs.clear() - self.generators.clear() - self.num_logprobs.clear() - self.prompt_logprob_reqs.clear() - - def condense(self, empty_req_indices: List[int]) -> None: - if self.num_reqs == 0: - # The batched states are empty. - return - - # NOTE(woosuk): This function assumes that the empty_req_indices - # is sorted in descending order. - last_req_index = self.num_reqs + len(empty_req_indices) - 1 - while empty_req_indices: - # Find the largest non-empty index. - while last_req_index in empty_req_indices: - last_req_index -= 1 - - # Find the smallest empty index. - empty_index = empty_req_indices.pop() - if empty_index >= last_req_index: - break - - # Swap the states. - req_id = self.req_ids[last_req_index] - self.req_ids[empty_index] = req_id - self.req_ids[last_req_index] = None - self.req_id_to_index[req_id] = empty_index - - # TODO(woosuk): Optimize the copy of token_ids_cpu and - # block_table_cpu. - self.token_ids_cpu[empty_index] = self.token_ids_cpu[ - last_req_index] - self.num_computed_tokens_cpu[ - empty_index] = self.num_computed_tokens_cpu[last_req_index] - self.block_table_cpu[empty_index] = self.block_table_cpu[ - last_req_index] - self.temperature_cpu[empty_index] = self.temperature_cpu[ - last_req_index] - self.top_p_cpu[empty_index] = self.top_p_cpu[last_req_index] - self.top_k_cpu[empty_index] = self.top_k_cpu[last_req_index] - generator = self.generators.pop(last_req_index, None) - if generator is not None: - self.generators[empty_index] = generator - - # Decrement last_req_index since it is now empty. - last_req_index -= 1 - - def make_sampling_metadata( - self, - skip_copy: bool = False, - ) -> SamplingMetadata: - if not skip_copy: - self.temperature[:self.num_reqs].copy_( - self.temperature_cpu_tensor[:self.num_reqs], non_blocking=True) - self.top_p[:self.num_reqs].copy_( - self.top_p_cpu_tensor[:self.num_reqs], non_blocking=True) - self.top_k[:self.num_reqs].copy_( - self.top_k_cpu_tensor[:self.num_reqs], non_blocking=True) - return SamplingMetadata( - temperature=self.temperature[:self.num_reqs], - all_greedy=self.all_greedy, - all_random=self.all_random, - top_p=self.top_p[:self.num_reqs], - top_k=self.top_k[:self.num_reqs], - no_top_p=self.no_top_p, - no_top_k=self.no_top_k, - generators=self.generators, - max_num_logprobs=self.max_num_logprobs, - ) - - @property - def num_reqs(self) -> int: - return len(self.req_id_to_index) - - @property - def all_greedy(self) -> bool: - return len(self.random_reqs) == 0 - - @property - def all_random(self) -> bool: - return len(self.greedy_reqs) == 0 - - @property - def no_top_p(self) -> bool: - return len(self.top_p_reqs) == 0 - - @property - def no_top_k(self) -> bool: - return len(self.top_k_reqs) == 0 - - @property - def max_num_logprobs(self) -> int: - return max(self.num_logprobs.values()) if self.num_logprobs else 0 - - @property - def no_logprob(self) -> bool: - return len(self.num_logprobs) == 0 - - @property - def no_prompt_logprob(self) -> bool: - return len(self.prompt_logprob_reqs) == 0 diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 0a5adfb28c9bd..e8d964a722f60 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -1,6 +1,7 @@ import gc import time -from typing import TYPE_CHECKING, Dict, List, Optional, Tuple +from dataclasses import dataclass +from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple import numpy as np import torch @@ -14,16 +15,16 @@ from vllm.logger import init_logger from vllm.model_executor.model_loader import get_model from vllm.multimodal import MultiModalKwargs -from vllm.sampling_params import SamplingType +from vllm.sampling_params import SamplingParams, SamplingType from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, DeviceMemoryProfiler, cdiv, is_pin_memory_available) from vllm.v1.attention.backends.flash_attn import (FlashAttentionBackend, FlashAttentionMetadata) from vllm.v1.outputs import ModelRunnerOutput from vllm.v1.sample.metadata import SamplingMetadata -from vllm.v1.worker.gpu_input_batch import CachedRequestState, InputBatch if TYPE_CHECKING: + from vllm.multimodal.inputs import PlaceholderRange from vllm.v1.core.scheduler import SchedulerOutput logger = init_logger(__name__) @@ -34,7 +35,6 @@ class GPUModelRunner: def __init__( self, vllm_config: VllmConfig, - device: torch.device, input_registry: InputRegistry = INPUT_REGISTRY, ): self.vllm_config = vllm_config @@ -44,6 +44,7 @@ def __init__( self.load_config = vllm_config.load_config self.parallel_config = vllm_config.parallel_config self.scheduler_config = vllm_config.scheduler_config + self.device_config = vllm_config.device_config self.speculative_config = vllm_config.speculative_config self.prompt_adapter_config = vllm_config.prompt_adapter_config self.observability_config = vllm_config.observability_config @@ -52,7 +53,7 @@ def __init__( cache_config = self.cache_config scheduler_config = self.scheduler_config parallel_config = self.parallel_config - self.device = device + self.device = self.device_config.device self.pin_memory = is_pin_memory_available() self.dtype = self.model_config.dtype if cache_config.cache_dtype == "auto": @@ -477,7 +478,9 @@ def execute_model( sampling_metadata=sampling_metadata, ) - sampled_token_ids = sampler_output.sampled_token_ids + # NOTE: CPU-GPU synchronization happens here. + sampled_token_ids = sampler_output.sampled_token_ids.cpu() + sampled_token_ids_list = sampled_token_ids.tolist() # TODO(woosuk): The following loop can be slow since it iterates over # the requests one by one. Optimize. num_reqs = self.input_batch.num_reqs @@ -488,7 +491,7 @@ def execute_model( assert seq_len <= req_state.num_tokens if seq_len == req_state.num_tokens: # Append the sampled token to the output token ids. - token_id = sampled_token_ids[i] + token_id = sampled_token_ids_list[i] self.input_batch.token_ids_cpu[i, seq_len] = token_id req_state.output_token_ids.append(token_id) else: @@ -510,7 +513,7 @@ def execute_model( model_runner_output = ModelRunnerOutput( req_ids=self.input_batch.req_ids[:num_reqs], req_id_to_index=self.input_batch.req_id_to_index, - sampled_token_ids=sampled_token_ids, + sampled_token_ids_cpu=sampled_token_ids, logprob_token_ids_cpu=logprob_token_ids, logprobs_cpu=logprobs, ) @@ -580,9 +583,6 @@ def capture_model(self) -> None: # can reuse the memory pool allocated for the large shapes. with graph_capture(): for num_tokens in reversed(self.cudagraph_batch_sizes): - for _ in range(self.vllm_config.compilation_config. - cudagraph_num_of_warmups): - self._dummy_run(self.model, num_tokens, self.kv_caches) self._dummy_run(self.model, num_tokens, self.kv_caches) end_time = time.perf_counter() @@ -609,3 +609,269 @@ def _get_padded_batch_size(self, batch_size: int) -> Optional[int]: if batch_size <= size: return size return None + + +@dataclass +class CachedRequestState: + + req_id: str + prompt_token_ids: List[int] + prompt: Optional[str] + mm_inputs: List[MultiModalKwargs] + mm_positions: List["PlaceholderRange"] + sampling_params: SamplingParams + generator: Optional[torch.Generator] + + block_ids: List[int] + num_computed_tokens: int + output_token_ids: List[int] + + @property + def num_tokens(self) -> int: + return len(self.prompt_token_ids) + len(self.output_token_ids) + + +class InputBatch: + + def __init__( + self, + max_num_reqs: int, + max_model_len: int, + max_num_blocks_per_req: int, + device: torch.device, + pin_memory: bool, + ): + self.max_num_reqs = max_num_reqs + self.max_model_len = max_model_len + self.max_num_blocks_per_req = max_num_blocks_per_req + self.device = device + self.pin_memory = pin_memory + + self.req_ids: List[Optional[str]] = [None] * max_num_reqs + self.req_id_to_index: Dict[str, int] = {} + + self.token_ids_cpu = np.empty((max_num_reqs, max_model_len), + dtype=np.int32) + self.num_computed_tokens_cpu = np.empty(max_num_reqs, dtype=np.int32) + + # Attention-related. + self.block_table = torch.zeros((max_num_reqs, max_num_blocks_per_req), + device=self.device, + dtype=torch.int32) + self.block_table_cpu_tensor = torch.zeros( + (max_num_reqs, max_num_blocks_per_req), + device="cpu", + dtype=torch.int32, + pin_memory=pin_memory, + ) + self.block_table_cpu = self.block_table_cpu_tensor.numpy() + + # Sampling-related. + self.temperature = torch.empty((max_num_reqs, ), + dtype=torch.float32, + device=device) + self.temperature_cpu_tensor = torch.empty((max_num_reqs, ), + dtype=torch.float32, + device="cpu", + pin_memory=pin_memory) + self.temperature_cpu = self.temperature_cpu_tensor.numpy() + self.greedy_reqs: Set[str] = set() + self.random_reqs: Set[str] = set() + + self.top_p = torch.empty((max_num_reqs, ), + dtype=torch.float32, + device=device) + self.top_p_cpu_tensor = torch.empty((max_num_reqs, ), + dtype=torch.float32, + device="cpu", + pin_memory=pin_memory) + self.top_p_cpu = self.top_p_cpu_tensor.numpy() + self.top_p_reqs: Set[str] = set() + + self.top_k = torch.empty((max_num_reqs, ), + dtype=torch.int32, + device=device) + self.top_k_cpu_tensor = torch.empty((max_num_reqs, ), + dtype=torch.int32, + device="cpu", + pin_memory=pin_memory) + self.top_k_cpu = self.top_k_cpu_tensor.numpy() + self.top_k_reqs: Set[str] = set() + + # req_index -> generator + self.generators: Dict[int, torch.Generator] = {} + + self.num_logprobs: Dict[str, int] = {} + self.prompt_logprob_reqs: Set[str] = set() + + def add_request( + self, + request: "CachedRequestState", + req_index: Optional[int] = None, + ) -> None: + if req_index is None: + req_index = self.num_reqs + assert req_index < self.max_num_reqs + + req_id = request.req_id + self.req_ids[req_index] = req_id + self.req_id_to_index[req_id] = req_index + + # Copy the prompt token ids and output token ids. + num_prompt_tokens = len(request.prompt_token_ids) + self.token_ids_cpu[ + req_index, :num_prompt_tokens] = request.prompt_token_ids + start_idx = num_prompt_tokens + end_idx = start_idx + len(request.output_token_ids) + self.token_ids_cpu[req_index, + start_idx:end_idx] = request.output_token_ids + + self.num_computed_tokens_cpu[req_index] = request.num_computed_tokens + num_blocks = len(request.block_ids) + self.block_table_cpu[req_index, :num_blocks] = request.block_ids + + sampling_params = request.sampling_params + self.temperature_cpu[req_index] = sampling_params.temperature + if sampling_params.sampling_type == SamplingType.GREEDY: + self.greedy_reqs.add(req_id) + else: + self.random_reqs.add(req_id) + + self.top_p_cpu[req_index] = sampling_params.top_p + if sampling_params.top_p < 1: + self.top_p_reqs.add(req_id) + self.top_k_cpu[req_index] = sampling_params.top_k + if sampling_params.top_k > 0: + self.top_k_reqs.add(req_id) + + self.generators[req_index] = request.generator + + num_logprobs = sampling_params.logprobs + if num_logprobs is not None and num_logprobs > 0: + self.num_logprobs[req_id] = num_logprobs + if sampling_params.prompt_logprobs: + self.prompt_logprob_reqs.add(req_id) + + def remove_request(self, req_id: str) -> Optional[int]: + req_index = self.req_id_to_index.pop(req_id, None) + if req_index is None: + return None + self.req_ids[req_index] = None + + self.greedy_reqs.discard(req_id) + self.random_reqs.discard(req_id) + self.top_p_reqs.discard(req_id) + self.top_k_reqs.discard(req_id) + self.generators.pop(req_index, None) + self.num_logprobs.pop(req_id, None) + self.prompt_logprob_reqs.discard(req_id) + return req_index + + def clear(self) -> None: + self.req_ids = [None] * self.max_num_reqs + self.req_id_to_index.clear() + self.greedy_reqs.clear() + self.random_reqs.clear() + self.top_p_reqs.clear() + self.top_k_reqs.clear() + self.generators.clear() + self.num_logprobs.clear() + self.prompt_logprob_reqs.clear() + + def condense(self, empty_req_indices: List[int]) -> None: + if self.num_reqs == 0: + # The batched states are empty. + return + + # NOTE(woosuk): This function assumes that the empty_req_indices + # is sorted in descending order. + last_req_index = self.num_reqs + len(empty_req_indices) - 1 + while empty_req_indices: + # Find the largest non-empty index. + while last_req_index in empty_req_indices: + last_req_index -= 1 + + # Find the smallest empty index. + empty_index = empty_req_indices.pop() + if empty_index >= last_req_index: + break + + # Swap the states. + req_id = self.req_ids[last_req_index] + self.req_ids[empty_index] = req_id + self.req_ids[last_req_index] = None + self.req_id_to_index[req_id] = empty_index + + # TODO(woosuk): Optimize the copy of token_ids_cpu and + # block_table_cpu. + self.token_ids_cpu[empty_index] = self.token_ids_cpu[ + last_req_index] + self.num_computed_tokens_cpu[ + empty_index] = self.num_computed_tokens_cpu[last_req_index] + self.block_table_cpu[empty_index] = self.block_table_cpu[ + last_req_index] + self.temperature_cpu[empty_index] = self.temperature_cpu[ + last_req_index] + self.top_p_cpu[empty_index] = self.top_p_cpu[last_req_index] + self.top_k_cpu[empty_index] = self.top_k_cpu[last_req_index] + generator = self.generators.pop(last_req_index, None) + if generator is not None: + self.generators[empty_index] = generator + + # Decrement last_req_index since it is now empty. + last_req_index -= 1 + + def make_sampling_metadata( + self, + skip_copy: bool = False, + ) -> SamplingMetadata: + if not skip_copy: + self.temperature[:self.num_reqs].copy_( + self.temperature_cpu_tensor[:self.num_reqs], non_blocking=True) + self.top_p[:self.num_reqs].copy_( + self.top_p_cpu_tensor[:self.num_reqs], non_blocking=True) + self.top_k[:self.num_reqs].copy_( + self.top_k_cpu_tensor[:self.num_reqs], non_blocking=True) + return SamplingMetadata( + temperature=self.temperature[:self.num_reqs], + all_greedy=self.all_greedy, + all_random=self.all_random, + top_p=self.top_p[:self.num_reqs], + top_k=self.top_k[:self.num_reqs], + no_top_p=self.no_top_p, + no_top_k=self.no_top_k, + generators=self.generators, + max_num_logprobs=self.max_num_logprobs, + ) + + @property + def num_reqs(self) -> int: + return len(self.req_id_to_index) + + @property + def all_greedy(self) -> bool: + return len(self.random_reqs) == 0 + + @property + def all_random(self) -> bool: + return len(self.greedy_reqs) == 0 + + @property + def no_top_p(self) -> bool: + return len(self.top_p_reqs) == 0 + + @property + def no_top_k(self) -> bool: + return len(self.top_k_reqs) == 0 + + @property + def max_num_logprobs(self) -> int: + return max(self.num_logprobs.values()) if self.num_logprobs else 0 + + @property + def no_logprob(self) -> bool: + return len(self.num_logprobs) == 0 + + @property + def no_prompt_logprob(self) -> bool: + return len(self.prompt_logprob_reqs) == 0 diff --git a/vllm/v1/worker/gpu_worker.py b/vllm/v1/worker/gpu_worker.py index d32848c3775ae..d33b55a8a9f9a 100644 --- a/vllm/v1/worker/gpu_worker.py +++ b/vllm/v1/worker/gpu_worker.py @@ -15,7 +15,6 @@ from vllm.model_executor import set_random_seed from vllm.platforms import current_platform from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE, get_dtype_size -from vllm.v1.core.scheduler import SchedulerOutput from vllm.v1.outputs import ModelRunnerOutput from vllm.v1.worker.gpu_model_runner import GPUModelRunner @@ -57,6 +56,7 @@ def __init__( from vllm.utils import init_cached_hf_modules init_cached_hf_modules() + self.model_runner = GPUModelRunner(vllm_config) # Torch profiler. Enabled and configured through env vars: # VLLM_TORCH_PROFILER_DIR=/path/to/save/trace if envs.VLLM_TORCH_PROFILER_DIR: @@ -103,9 +103,6 @@ def initialize(self): # Set random seed. set_random_seed(self.model_config.seed) - # Construct the model runner - self.model_runner = GPUModelRunner(self.vllm_config, self.device) - def load_model(self) -> None: self.model_runner.load_model() @@ -201,7 +198,7 @@ def execute_model( scheduler_output: "SchedulerOutput", ) -> ModelRunnerOutput: output = self.model_runner.execute_model(scheduler_output) - return output if self.rank == 0 else None + # TODO(woosuk): Send the output to the engine process. return output def profile(self, is_start=True): @@ -212,10 +209,6 @@ def profile(self, is_start=True): else: self.profiler.stop() - def check_health(self) -> None: - # worker will always be healthy as long as it's running. - return - def init_worker_distributed_environment( parallel_config: ParallelConfig, diff --git a/vllm/worker/hpu_model_runner.py b/vllm/worker/hpu_model_runner.py index def57fd0965ef..48c4af5f915fa 100755 --- a/vllm/worker/hpu_model_runner.py +++ b/vllm/worker/hpu_model_runner.py @@ -687,10 +687,7 @@ def load_model(self) -> None: assert hasattr( self.model, "embedding_padding_modules" ), "Model does not have embedding_padding_modules" - assert not self.lora_config.bias_enabled, \ - "Bias support in LoRA is not enabled in HPU yet." - assert not self.lora_config.fully_sharded_loras, \ - "Fully sharded LoRAs is not enabled in HPU yet." + if supports_multimodal(self.model): logger.warning( "Regarding multimodal models, vLLM currently " diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index 1bc5f65c7127f..4388b3c1ee164 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -1782,9 +1782,6 @@ def need_recv_kv(self, model_input, kv_caches) -> bool: kv_caches: vLLM's paged memory """ - if self.vllm_config.kv_transfer_config is None: - return False - prefill_meta = model_input.attn_metadata.prefill_metadata # check if the current run is profiling @@ -1792,6 +1789,9 @@ def need_recv_kv(self, model_input, kv_caches) -> bool: # check if the current run is prefill is_prefill_run = prefill_meta is not None + if self.vllm_config.kv_transfer_config is None: + return False + return self.vllm_config.kv_transfer_config.is_kv_consumer and ( not is_profile_run) and is_prefill_run @@ -1807,9 +1807,6 @@ def need_send_kv(self, model_input, kv_caches) -> bool: kv_caches: vLLM's paged memory """ - if self.vllm_config.kv_transfer_config is None: - return False - prefill_meta = model_input.attn_metadata.prefill_metadata # check if the current run is profiling @@ -1817,6 +1814,9 @@ def need_send_kv(self, model_input, kv_caches) -> bool: # check if the current run is prefill is_prefill_run = prefill_meta is not None + if self.vllm_config.kv_transfer_config is None: + return False + return self.vllm_config.kv_transfer_config.is_kv_producer and ( not is_profile_run) and is_prefill_run diff --git a/vllm/worker/worker_base.py b/vllm/worker/worker_base.py index 6d00102e0a324..7c0bc5a678956 100644 --- a/vllm/worker/worker_base.py +++ b/vllm/worker/worker_base.py @@ -439,7 +439,7 @@ def init_worker(self, *args, **kwargs): Here we inject some common logic before initializing the worker. Arguments are passed to the worker class constructor. """ - enable_trace_function_call_for_thread(self.vllm_config) + enable_trace_function_call_for_thread() # see https://github.com/NVIDIA/nccl/issues/1234 os.environ['NCCL_CUMEM_ENABLE'] = '0'