Skip to content

Commit

Permalink
Add & fix resnet50
Browse files Browse the repository at this point in the history
  • Loading branch information
Aba committed Jul 31, 2024
1 parent 3e3307c commit 3e3bda2
Show file tree
Hide file tree
Showing 4 changed files with 574 additions and 139 deletions.
4 changes: 4 additions & 0 deletions deepsocflow/py/xbundle.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,9 @@ def call(self, input_tensor, x_add=None, training=False):

x = self.add([x, x_add])
x = self.add.act(x)
elif self.add is not None:
raise ValueError("A Bundle initialized with add_act(), should have the add tensor passed")

if self.pool:
x = self.pool(x)
x = self.pool.act(x)
Expand All @@ -79,6 +82,7 @@ def call_int(self, x, hw):
out = self.core.act.call_int(out, hw)

if self.add:
print(f"Bundle {self.ib} source_ib: {self.add.source_ib}")
out = self.add.call_int(out, hw)
out = self.add.act.call_int(out, hw)

Expand Down
16 changes: 6 additions & 10 deletions deepsocflow/py/xlayers.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import tensorflow as tf
from tensorflow import keras
from keras.layers import Layer, Add
from keras.layers import Layer, Add, MaxPooling2D
from qkeras import *
import numpy as np
import math
Expand All @@ -17,10 +17,10 @@ def __init__(self, sys_bits, o_int_bits, type='relu', slope=1, *args, **kwargs):
self.o_int_bits = o_int_bits
self.type = type

self.slope = slope
self.non_zero = 1*(slope != 0)
self.log_slope = np.log2(slope) if self.non_zero else 0
assert int(self.log_slope) == self.log_slope and self.log_slope <= 0, f"Error: negative_slope:{slope} of leaky_relu has to be a negative power of two. eg.0.125"
self.slope = 1 if type == None else slope
self.non_zero = 1*(self.slope != 0)
self.log_slope = np.log2(self.slope) if self.non_zero else 0
assert int(self.log_slope) == self.log_slope and self.log_slope <= 0, f"Error: negative_slope:{self.slope} of leaky_relu has to be a negative power of two. eg.0.125"
self.plog_slope = -int(self.log_slope)
self.shift_bits = None

Expand Down Expand Up @@ -53,9 +53,8 @@ def call_int(self, x_tensor, hw):
x = np.clip(x, -2**(self.out.bits - self.plog_slope - 1), 2**(self.out.bits-1)-1).astype(int)

out = XTensor(tensor=x, bits=self.out.bits, frac=self.out.frac, from_int=True)

assert np.allclose(out.ftensor, self.out.ftensor), \
f"Activation output does not match. \nout:{out.ftensor.numpy().flatten()[:100]}, \nself.out:{self.out.ftensor.numpy().flatten()[:100]}"
f"Activation output does not match. {(out.ftensor.shape, self.out.ftensor.shape)} \nout:{out.ftensor.numpy().flatten()}, \nself.out:{self.out.ftensor.numpy().flatten()}, \nsub:{out.ftensor.numpy().flatten()-self.out.ftensor.numpy().flatten()}"
self.out = out
return out

Expand Down Expand Up @@ -119,9 +118,6 @@ def call_int(self, x_tensor, hw):
Add Bias
'''

print(f"{self.use_bias}, {self.bias_quantizer_internal}")
print(f"{self.get_folded_weights()[1]}")

out, (self.bias_val_shift, self.bias_b_shift) = out.add_val_shift(self.b)
assert out.bits <= hw.INT_BITS, \
f"After bias addition, resulting bits {out.bits} are more than bits for integer in CPU {hw.INT_BITS}. Reduce bits or increase integer bits of bias to continue"
Expand Down
Loading

0 comments on commit 3e3bda2

Please sign in to comment.