Skip to content

Commit

Permalink
Fix ShardedStateLoader for vllm fp8 quantization (vllm-project#7708)
Browse files Browse the repository at this point in the history
  • Loading branch information
sfc-gh-zhwang authored and omrishiv committed Aug 26, 2024
1 parent 0d286a0 commit a44ec30
Showing 1 changed file with 4 additions and 0 deletions.
4 changes: 4 additions & 0 deletions vllm/model_executor/model_loader/loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -579,6 +579,10 @@ def load_model(self, *, model_config: ModelConfig,
with torch.device(device_config.device):
model = _initialize_model(model_config, self.load_config,
lora_config, cache_config)
for _, module in model.named_modules():
quant_method = getattr(module, "quant_method", None)
if quant_method is not None:
quant_method.process_weights_after_loading(module)
rank = get_tensor_model_parallel_rank()
pattern = os.path.join(
local_model_path,
Expand Down

0 comments on commit a44ec30

Please sign in to comment.