Skip to content

Commit

Permalink
Fix bug causing random initialization of bias when using GPTQ quantiz…
Browse files Browse the repository at this point in the history
…ation with models without bias (#1827)

* Fix gptq quantization for models without bias

* Fix gptq quantization for models without bias
  • Loading branch information
B-201 authored Apr 29, 2024
1 parent c55f882 commit e3fd277
Showing 1 changed file with 3 additions and 2 deletions.
5 changes: 3 additions & 2 deletions optimum/gptq/quantizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -278,19 +278,20 @@ def _replace_by_quant_layers(self, module: nn.Module, names: List[str], name: st
elif isinstance(layer, Conv1D):
in_features = layer.weight.shape[0]
out_features = layer.weight.shape[1]
bias = layer.bias is not None
if not (self.desc_act) or self.group_size == -1:
new_layer = QuantLinear(
self.bits,
self.group_size,
in_features,
out_features,
True,
bias,
use_cuda_fp16=self.use_cuda_fp16,
weight_dtype=layer.weight.dtype,
)
else:
new_layer = QuantLinear(
self.bits, self.group_size, in_features, out_features, True, weight_dtype=layer.weight.dtype
self.bits, self.group_size, in_features, out_features, bias, weight_dtype=layer.weight.dtype
)
new_layer.device = device
setattr(module, attr, new_layer.to(device))
Expand Down

0 comments on commit e3fd277

Please sign in to comment.