From 02b62ec4c05c3e6c7db990cc1125d2983ee8e05f Mon Sep 17 00:00:00 2001 From: Max de Bayser Date: Tue, 22 Oct 2024 20:45:02 -0300 Subject: [PATCH] appease linter Signed-off-by: Max de Bayser --- vllm/config.py | 8 ++++---- vllm/model_executor/model_loader/loader.py | 4 ++-- vllm/model_executor/models/bert.py | 5 ++--- vllm/transformers_utils/config.py | 2 +- 4 files changed, 9 insertions(+), 10 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 4a4c764053c65..42edc95d03916 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -5,19 +5,19 @@ Mapping, Optional, Set, Tuple, Type, Union) import torch +from transformers import PretrainedConfig import vllm.envs as envs -from transformers import PretrainedConfig from vllm.logger import init_logger -from vllm.model_executor.layers.pooler import PoolingConfig # noqa: F401 +from vllm.model_executor.layers.pooler import PoolingConfig from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS from vllm.model_executor.models import ModelRegistry from vllm.platforms import current_platform from vllm.tracing import is_otel_available, otel_import_error_traceback -from vllm.transformers_utils.config import get_pooling_config # noqa: F401 from vllm.transformers_utils.config import (ConfigFormat, get_config, get_hf_image_processor_config, - get_hf_text_config) + get_hf_text_config, + get_pooling_config) from vllm.utils import (GiB_bytes, cuda_device_count_stateless, get_cpu_memory, is_hip, is_openvino, is_xpu, print_warning_once) diff --git a/vllm/model_executor/model_loader/loader.py b/vllm/model_executor/model_loader/loader.py index 2a648e0a20b2d..133654e53c5d4 100644 --- a/vllm/model_executor/model_loader/loader.py +++ b/vllm/model_executor/model_loader/loader.py @@ -18,9 +18,9 @@ import torch from huggingface_hub import HfApi, hf_hub_download from torch import nn +from transformers import AutoModelForCausalLM, PretrainedConfig from transformers.utils import SAFE_WEIGHTS_INDEX_NAME -from transformers import AutoModelForCausalLM, PretrainedConfig from vllm.config import (CacheConfig, DeviceConfig, LoadConfig, LoadFormat, LoRAConfig, ModelConfig, MultiModalConfig, ParallelConfig, SchedulerConfig) @@ -28,7 +28,7 @@ get_tensor_model_parallel_world_size) from vllm.envs import VLLM_USE_MODELSCOPE from vllm.logger import init_logger -from vllm.model_executor.layers.pooler import PoolingConfig # noqa: F401 +from vllm.model_executor.layers.pooler import PoolingConfig from vllm.model_executor.layers.quantization.base_config import ( QuantizationConfig) from vllm.model_executor.model_loader.tensorizer import ( diff --git a/vllm/model_executor/models/bert.py b/vllm/model_executor/models/bert.py index a0181bfd2a64f..54b85b05287fb 100644 --- a/vllm/model_executor/models/bert.py +++ b/vllm/model_executor/models/bert.py @@ -2,8 +2,8 @@ import torch from torch import nn - from transformers import BertConfig + from vllm.attention import Attention, AttentionMetadata, AttentionType from vllm.attention.backends.xformers import XFormersImpl from vllm.config import CacheConfig @@ -12,8 +12,7 @@ from vllm.model_executor.layers.linear import (ColumnParallelLinear, QKVParallelLinear, RowParallelLinear) -from vllm.model_executor.layers.pooler import (Pooler, # noqa: F401 - PoolingConfig) +from vllm.model_executor.layers.pooler import Pooler, PoolingConfig from vllm.model_executor.layers.quantization.base_config import ( QuantizationConfig) from vllm.model_executor.layers.vocab_parallel_embedding import ( diff --git a/vllm/transformers_utils/config.py b/vllm/transformers_utils/config.py index fbf8e96817e61..7a2f82c72b290 100644 --- a/vllm/transformers_utils/config.py +++ b/vllm/transformers_utils/config.py @@ -6,13 +6,13 @@ import huggingface_hub from huggingface_hub import (file_exists, hf_hub_download, try_to_load_from_cache) +from transformers import GenerationConfig, PretrainedConfig from transformers.models.auto.image_processing_auto import ( get_image_processor_config) from transformers.models.auto.modeling_auto import ( MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) from transformers.utils import CONFIG_NAME as HF_CONFIG_NAME -from transformers import GenerationConfig, PretrainedConfig from vllm.envs import VLLM_USE_MODELSCOPE from vllm.logger import init_logger # yapf conflicts with isort for this block