From fe96b7c9d3a04d8de51ae6eaff506eea1f3dfd8b Mon Sep 17 00:00:00 2001 From: bk-201 Date: Wed, 24 Apr 2024 15:30:16 +0800 Subject: [PATCH] Fix gptq quantization for models without bias --- optimum/gptq/quantizer.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/optimum/gptq/quantizer.py b/optimum/gptq/quantizer.py index 289e325682..97de738c4f 100644 --- a/optimum/gptq/quantizer.py +++ b/optimum/gptq/quantizer.py @@ -278,19 +278,20 @@ def _replace_by_quant_layers(self, module: nn.Module, names: List[str], name: st elif isinstance(layer, Conv1D): in_features = layer.weight.shape[0] out_features = layer.weight.shape[1] + bias = True if layer.bias else False if not (self.desc_act) or self.group_size == -1: new_layer = QuantLinear( self.bits, self.group_size, in_features, out_features, - True, + bias, use_cuda_fp16=self.use_cuda_fp16, weight_dtype=layer.weight.dtype, ) else: new_layer = QuantLinear( - self.bits, self.group_size, in_features, out_features, True, weight_dtype=layer.weight.dtype + self.bits, self.group_size, in_features, out_features, bias, weight_dtype=layer.weight.dtype ) new_layer.device = device setattr(module, attr, new_layer.to(device))