From 83dbf25c33ae04b7b1f9ed58ee7bb47783caae6d Mon Sep 17 00:00:00 2001 From: Giuseppe Franco Date: Fri, 26 Apr 2024 13:43:10 +0100 Subject: [PATCH] Fix --- src/brevitas/graph/gpfq.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/brevitas/graph/gpfq.py b/src/brevitas/graph/gpfq.py index 3ad5bf024..17d6a9c89 100644 --- a/src/brevitas/graph/gpfq.py +++ b/src/brevitas/graph/gpfq.py @@ -314,8 +314,8 @@ def __init__( def single_layer_update(self): # raise error in case no quant-input is here - if self.quant_input is None: - raise ValueError('Expected self.quant_input to calculate L1-norm upper bound, but recevied None. ' + \ + if self.quant_metadata is None: + raise ValueError('Expected self.quant_metadata to calculate L1-norm upper bound, but recevied None. ' + \ 'Make sure that either the input to the model is a IntQuantTensor or the layer has an input quant enabled. ' \ 'Also, check if `use_quant_activations=True` in `gpfq_mode` when `accumulator_bit_width` is specified. ' + \ 'Alternatively, provide a custom `a2q_layer_filter_fnc` to `gpfq_mode` to filter layers without a quant_tensor input.')