From 3e21d57a61f4f67c8f849a6cb072f4464c19da74 Mon Sep 17 00:00:00 2001 From: "Wang, Chang" Date: Fri, 29 Nov 2024 17:27:52 +0300 Subject: [PATCH] Update optimum/intel/neural_compressor/quantization.py Co-authored-by: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> --- optimum/intel/neural_compressor/quantization.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/optimum/intel/neural_compressor/quantization.py b/optimum/intel/neural_compressor/quantization.py index 8140dccb26..e43bda0001 100644 --- a/optimum/intel/neural_compressor/quantization.py +++ b/optimum/intel/neural_compressor/quantization.py @@ -376,7 +376,7 @@ def _weight_only_quantization( low_cpu_mem_usage = True if use_xpu: - if hasattr(quantization_config, "use_layer_wise") and quantization_config.use_layer_wise: + if getattr(quantization_config, "use_layer_wise", False): from neural_compressor.torch import load_empty_model model = load_empty_model(model_id, cls=model_class, trust_remote_code=trust_remote_code)