Skip to content

Commit

Permalink
Fix (core/float): add default for float_scaling_impl
Browse files Browse the repository at this point in the history
  • Loading branch information
Giuseppe5 committed Jun 18, 2024
1 parent 6bdb1f8 commit 181ef80
Showing 1 changed file with 3 additions and 0 deletions.
3 changes: 3 additions & 0 deletions src/brevitas/core/quant/float.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,9 @@ def __init__(
if scaling_impl is None:
scaling_impl = ConstScaling(1., device=device, dtype=dtype)

if float_scaling_impl is None:
float_scaling_impl = ConstScaling(1., device=device, dtype=dtype)

# Zero-point is currently hardcoded to 0
self.zero_point_impl = StatelessBuffer(torch.tensor(0., device=device, dtype=dtype))
self.float_scaling_impl = float_scaling_impl
Expand Down

0 comments on commit 181ef80

Please sign in to comment.