Skip to content

Commit 64ab562

Browse files
committed
fix weight_loading_mode for deepseek
Signed-off-by: Anthony Chang <[email protected]>
1 parent f03053b commit 64ab562

File tree

1 file changed

+18
-4
lines changed

1 file changed

+18
-4
lines changed

tensorrt_llm/_torch/models/modeling_deepseekv3.py

Lines changed: 18 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@
4545
from tensorrt_llm.llmapi.utils import enable_llm_debug
4646
from tensorrt_llm.mapping import Mapping
4747
from tensorrt_llm.models.modeling_utils import QuantConfig
48+
from tensorrt_llm.quantization.mode import QuantAlgo
4849
from tensorrt_llm.quantization.utils.fp8_utils import (
4950
resmooth_to_fp8_e8m0, transform_sf_into_required_layout)
5051

@@ -457,10 +458,13 @@ def __init__(self,
457458
layer_idx=layer_idx,
458459
# DS-R1 W4A8 is only supported through custom quantization script from
459460
# examples/quantization/quantize_mixed_precision_moe.py
460-
weight_loading_mode=(MoEWeightLoadingMode.W4A8_CUSTOM
461-
if model_config.quant_config.quant_mode.
462-
is_int4_weight_only_per_group() else
463-
MoEWeightLoadingMode.VANILLA))
461+
weight_loading_mode=(
462+
MoEWeightLoadingMode.W4A8_CUSTOM
463+
if self._get_experts_quant_config(
464+
model_config,
465+
layer_idx).layer_quant_mode.is_int4_weight_only_per_group()
466+
else MoEWeightLoadingMode.VANILLA),
467+
)
464468

465469
self.mapping = model_config.mapping
466470

@@ -525,6 +529,13 @@ def _compute_shared_expert_tp_size(self, intermediate_size: int,
525529

526530
return shared_tp_size, shared_output_scale
527531

532+
@staticmethod
533+
def _get_experts_quant_config(model_config, layer_idx: int):
534+
if model_config.quant_config_dict is None:
535+
return None
536+
return model_config.quant_config_dict.get(
537+
f"model.layers.{layer_idx}.mlp.experts", model_config.quant_config)
538+
528539
def compute_routed_output(self, hidden_states, hidden_states_fp4,
529540
all_rank_num_tokens, all_rank_max_num_tokens,
530541
do_finalize):
@@ -635,6 +646,9 @@ def __init__(self, model_config: ModelConfig[PretrainedConfig],
635646
quant_config = self._get_decoder_layer_quant_config(
636647
model_config, layer_idx)
637648
self.is_nvfp4 = quant_config.layer_quant_mode.has_nvfp4()
649+
assert (
650+
quant_config.quant_algo
651+
is not QuantAlgo.MIXED_PRECISION), "MIXED_PRECISION is ambiguous"
638652

639653
has_tp = mapping.has_tp()
640654

0 commit comments

Comments
 (0)