Skip to content

Commit

Permalink
Updated numpy-scipy side to support f32 #69
Browse files Browse the repository at this point in the history
  • Loading branch information
pthomadakis committed Nov 7, 2024
1 parent f4fe43a commit 836f8ce
Show file tree
Hide file tree
Showing 4 changed files with 124 additions and 59 deletions.
29 changes: 11 additions & 18 deletions frontends/numpy-scipy/cometpy/MLIRGen/builders.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,6 @@ def get_mlir(self, make_private=True, include_func_defs=True) -> str:
return_type = ", ".join(str(rt) for rt in self.return_types)
if len(self.return_types) != 1:
return_type = f"({return_type})"
# signature = ", ".join(f"{ var[0].replace('%','%arg_')}: {var[1].replace('tensor', 'memref')}" if 'tensor' in var[1] else f"{ var[0].replace('%', '%arg_') }: memref<1xf64>" if var[1] =="f64" else f"{ var[0]}: {var[1]}" for var in self.inputs)
signature = ", ".join(f"{ var[0]}: {var[1]}" if 'tensor' in var[1] else f"{ var[0].replac }: tensor<1xf64>" if var[1] =="f64" else f"{ var[0]}: {var[1]}" for var in self.inputs)

return needed_function_definitions + self.function_wrapper_text.render(
Expand Down Expand Up @@ -211,13 +210,13 @@ class TensorSumBuilder:

def __init__(self, data): # lhs, operators, tensors_shapes, label_map):
self.lhs = data["out_id"]
self.input_type = get_tensor_type("f64", data["shapes"][0], data["formats"][0])
# "tensor<{}xf64>".format("x".join(str(v) for v in data["shapes"][0]))
self.input_type = get_tensor_type(data['value_type'], data["shapes"][0], data["formats"][0])
self.output_type = data['value_type']

self.operators = "({})".format(",".join("%t"+str(v) for v in data["operands"]))

def build_op(self):
output_type = "f64"
output_type = self.output_type

return self.tensor_sum_wrapper_text.render(
lhs = self.lhs,
Expand All @@ -242,13 +241,11 @@ def __init__(self, data):# in_tensor, target, tensors_shapes, label_map, beta) :
self.formats = data["formats"]
self.tensors_shapes = data["shapes"]
self.beta = "{:e}".format(data["beta"])
self.type = data['value_type']


def build_op(self):
output_type = get_tensor_type('f64', self.tensors_shapes[-1], self.formats[-1]) #"tensor<{}xf64>".format("x".join(str(v) for v in self.tensors_shapes[-1]))
# input_type = []
# for t in self.tensors_shapes[:-1]:
# input_type.append("tensor<{}xf64>".format("x".join(str(v) for v in t)))
output_type = get_tensor_type(self.type, self.tensors_shapes[-1], self.formats[-1])

return self.set_op_wrapper_text.render(
dest = self.target,
Expand Down Expand Up @@ -278,9 +275,9 @@ def __init__(self, data):
self.tensors_shapes =[]
for l in data["shapes"]:
if isinstance(l, int):
self.tensors_shapes.append('f64')
self.tensors_shapes.append(data['value_type'])
else:
self.tensors_shapes.append('tensor<1xf64>')
self.tensors_shapes.append('tensor<1x{}>'.format(data['value_type']))

self.op = data["op"]

Expand Down Expand Up @@ -315,7 +312,6 @@ class ArithOp_Builder:
+"({{inputtype}})"
+"-> {{outputtype}}"
+ "\n" ,
# + '"ta.set_op"(%t{{dest}},%t{{dest}}) {__beta__ = {{beta}} : f64} : ({{outputtype}}, {{outputtype}}) -> ()\n',
undefined=jinja2.StrictUndefined,
)

Expand Down Expand Up @@ -408,15 +404,12 @@ def __init__(self, data):
if "semiring" in data:
self.semiring = data["semiring"]
self.beta = "{:e}".format(data["beta"])
self.datatype = data["value_type"]

def build_op(self):
input_type = []
for t,f in zip(self.tensors_shapes[:-1],self.formats[:-1]):
input_type.append(get_tensor_type('f64', t, f))
# if f == DENSE:
# input_type.append("tensor<{}xf64>".format("x".join(str(v) for v in t)))
# elif f == CSR:
# input_type.append("tensor<{}xf64>".format("x".join(str(v) for v in t)))
input_type.append(get_tensor_type(self.datatype, t, f))


for t in self.tensors_shapes:
Expand All @@ -425,7 +418,7 @@ def build_op(self):
input_type = ",".join(input_type)
if self.mask_shape != None:

input_type += ", " + get_tensor_type('f64', self.mask_shape, CSR)
input_type += ", " + get_tensor_type(self.datatype, self.mask_shape, CSR)
# beta_val = ArithOp_Builder.get_beta_val(self.op)

iMap = {}
Expand Down Expand Up @@ -458,7 +451,7 @@ def build_op(self):
indexing_map.append(temp)
indexing_maps = []

output_type = get_tensor_type('f64', [vMap[v] for v in self.op_ilabels[-1]], self.formats[-1]) #"tensor<{}xf64>".format("x".join(str(vMap[v]) for v in self.op_ilabels[-1]))
output_type = get_tensor_type(self.datatype, [vMap[v] for v in self.op_ilabels[-1]], self.formats[-1])

for imap in indexing_map:
indexing_maps.append("affine_map<({})->({})>".format(",".join(["d"+str(l) for l in range(i)]) , ",".join(["d"+str(l) for l in imap])))
Expand Down
32 changes: 25 additions & 7 deletions frontends/numpy-scipy/cometpy/MLIRGen/lowering.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,10 @@ def memref_from_np_array(np_array):
constructor = create_memref_type(ctype,len(np_array.shape))
elif np_array.dtype == 'float32':
ctype = c_float
constructor = create_memref_type(ctype,len(np_array.shape))
if(len(np_array.shape) == 1):
constructor = memref_f32
else:
constructor = create_memref_type(ctype,len(np_array.shape))
elif np_array.dtype == 'float64':
ctype = c_double
if(len(np_array.shape) == 1):
Expand All @@ -105,12 +108,18 @@ def memref_from_np_array(np_array):

# llvm_args += [*expand_memref_ptr(dim_sizes), 0, *expand_memref_ptr(A1pos), 0, *expand_memref_ptr(A2pos), *expand_memref_ptr(A2crd), *expand_memref_ptr(Aval)]

class output_csr(Structure):
class output_csr_f64(Structure):
_fields_ = [('dims_sizes', memref_i64), ('insert_1', c_longlong), ('A1pos', memref_i64), ('insert_2', c_longlong), ('A2pos', memref_i64), ('A2crd', memref_i64), ('Aval', memref_f64)]

class output_csr_f32(Structure):
_fields_ = [('dims_sizes', memref_i64), ('insert_1', c_longlong), ('A1pos', memref_i64), ('insert_2', c_longlong), ('A2pos', memref_i64), ('A2crd', memref_i64), ('Aval', memref_f32)]

class output_coo(Structure):
class output_coo_f64(Structure):
_fields_ = [('dims_sizes', memref_i64), ('insert_1', c_longlong), ('A1pos', memref_i64), ('A1crd', memref_i64), ('insert_2', c_longlong), ('A2crd', memref_i64), ('Aval', memref_f64)]

class output_coo_f32(Structure):
_fields_ = [('dims_sizes', memref_i64), ('insert_1', c_longlong), ('A1pos', memref_i64), ('A1crd', memref_i64), ('insert_2', c_longlong), ('A2crd', memref_i64), ('Aval', memref_f32)]

def np_array_to_memref(np_array):
ctype = ctypes.c_longlong
if np_array.dtype == 'int32':
Expand Down Expand Up @@ -276,9 +285,13 @@ def generate_llvm_args_from_ndarrays(num_in, *ndargs):
A2pos, A2pos_type = memref_from_np_array(A2pos_temp)
A2crd, A2crd_type = memref_from_np_array(A2crd_temp)

out = output_csr(dim_sizes, 0, A1pos, 0, A2pos, A2crd, Aval)
if ndarray.data.dtype == 'float64':
out = output_csr_f64(dim_sizes, 0, A1pos, 0, A2pos, A2crd, Aval)
llvm_args_types = [POINTER(output_csr_f64)] + llvm_args_types
elif ndarray.data.dtype == 'float32':
out = output_csr_f32(dim_sizes, 0, A1pos, 0, A2pos, A2crd, Aval)
llvm_args_types = [POINTER(output_csr_f32)] + llvm_args_types
llvm_args = [out] + llvm_args
llvm_args_types = [POINTER(output_csr)] + llvm_args_types
all_outputs.append(out)
elif ndarray.format == 'coo':
A1pos_temp = np.array([0, ndarray.nnz], dtype=np.int64)
Expand All @@ -291,9 +304,14 @@ def generate_llvm_args_from_ndarrays(num_in, *ndargs):
A1crd, A1crd_type = memref_from_np_array(A1crd_temp)
A2crd, A2crd_type = memref_from_np_array(A2crd_temp)

out = output_coo(dim_sizes, 0, A1pos, A1crd, 0, A2crd, Aval)
if ndarray.data.dtype == 'float64':
out = output_coo_f64(dim_sizes, 0, A1pos, A1crd, 0, A2crd, Aval)
llvm_args_types = [POINTER(output_coo_f64)] + llvm_args_types
elif ndarray.data.dtype == 'float32':
out = output_coo_f32(dim_sizes, 0, A1pos, A1crd, 0, A2crd, Aval)
llvm_args_types = [POINTER(output_coo_f32)] + llvm_args_types

llvm_args = [out] + llvm_args
llvm_args_types = [POINTER(output_coo)] + llvm_args_types
all_outputs.append(out)
else:

Expand Down
Loading

0 comments on commit 836f8ce

Please sign in to comment.