Skip to content

Commit

Permalink
Fix (gpxq): removing null ops
Browse files Browse the repository at this point in the history
  • Loading branch information
i-colbert committed Apr 26, 2024
1 parent 2e519f5 commit a24afdc
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 4 deletions.
2 changes: 0 additions & 2 deletions src/brevitas/graph/gpfq.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
from brevitas.graph.gpxq import StopFwdException
from brevitas.graph.gpxq import SUPPORTED_CONV_OP
import brevitas.nn as qnn
from brevitas.quant_tensor import _unpack_quant_tensor


class gpfq_mode(gpxq_mode):
Expand Down Expand Up @@ -163,7 +162,6 @@ def update_batch(self, module, input, current_layer):
is_quant_enabled = module.weight_quant.is_quant_enabled

inp = self.process_input(input)
inp = _unpack_quant_tensor(inp)
batch_size = inp.shape[0]

# Preprocess the input to compute the Hessian
Expand Down
2 changes: 0 additions & 2 deletions src/brevitas/graph/gptq.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
from brevitas.graph.gpxq import StopFwdException
from brevitas.graph.gpxq import SUPPORTED_CONV_OP
import brevitas.nn as qnn
from brevitas.quant_tensor import _unpack_quant_tensor


class gptq_mode(gpxq_mode):
Expand Down Expand Up @@ -145,7 +144,6 @@ def update_batch(self, module, input, current_layer):
# Update reference to current layer
current_layer.layer_names.add(self.name)
inp = self.process_input(input)
inp = _unpack_quant_tensor(inp)
batch_size = inp.shape[0]

# Preprocess the input to compute the Hessian
Expand Down

0 comments on commit a24afdc

Please sign in to comment.