Skip to content

Commit

Permalink
[Draft][tflchef/circlechef] Use a dedicated type for custom operators
Browse files Browse the repository at this point in the history
On going draft to use a dedicated type for custom operators in the recipe.

ONE-DCO-1.0-Signed-off-by: SeungHui Lee <[email protected]>
  • Loading branch information
Seunghui98 committed Oct 23, 2023
1 parent 9dcd0be commit f9cf248
Show file tree
Hide file tree
Showing 28 changed files with 75 additions and 49 deletions.
25 changes: 14 additions & 11 deletions compiler/circlechef/core/src/ModelChef.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -135,10 +135,11 @@ gather_builtincode_map(const ::circlechef::ModelRecipe &model_recipe)

for (const auto &operation : model_recipe.operation())
{
auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
if (op_chef->code() == circle::BuiltinOperator_CUSTOM)
if (operation.type() == "Custom")
continue;

auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);

// Various operation version is unified as the highest version among them
if (builtin_map.find(op_chef->code()) == builtin_map.end() ||
builtin_map[op_chef->code()] < operation.version())
Expand All @@ -151,10 +152,10 @@ gather_builtincode_map(const ::circlechef::ModelRecipe &model_recipe)
const auto &graph = model_recipe.graph(g);
for (const auto &operation : graph.operation())
{
auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
if (op_chef->code() == circle::BuiltinOperator_CUSTOM)
if (operation.type() == "Custom")
continue;

auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
// Various operation version is unified as the highest version among them
if (builtin_map.find(op_chef->code()) == builtin_map.end() ||
builtin_map[op_chef->code()] < operation.version())
Expand All @@ -171,9 +172,8 @@ std::set<std::string> gather_customcode_set(const ::circlechef::ModelRecipe &mod
std::set<std::string> customcode_set;
for (const auto &operation : model_recipe.operation())
{
auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
if (op_chef->code() == circle::BuiltinOperator_CUSTOM)
customcode_set.insert(operation.type());
if (operation.type() == "Custom")
customcode_set.insert(operation.custom_code());
}

// Add ops used in Graphs(subgraphs)
Expand All @@ -182,9 +182,8 @@ std::set<std::string> gather_customcode_set(const ::circlechef::ModelRecipe &mod
const auto &graph = model_recipe.graph(g);
for (const auto &operation : graph.operation())
{
auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
if (op_chef->code() == circle::BuiltinOperator_CUSTOM)
customcode_set.insert(operation.type());
if (operation.type() == "Custom")
customcode_set.insert(operation.custom_code());
}
}

Expand Down Expand Up @@ -418,7 +417,11 @@ template <typename T> void cook_graph(const T &graph, CookParams &cp)
{
assert(operation.has_type());

auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
std::string op_type = operation.type();
if (op_type == "Custom")
op_type = operation.custom_code();

auto op_chef = op_chef_registry().lookup(op_type).create(&operation);

// Create 'inputs'
std::vector<int32_t> input_vec = as_dataset(operation.input()).map(lookup).vectorize();
Expand Down
1 change: 1 addition & 0 deletions compiler/circlechef/proto/circlechef.proto
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ message Operation {
repeated string input = 2;
repeated string output = 3;
optional int32 version = 4 [default = 1];
optional string custom_code = 5;

optional BatchMatMulOptions batch_matmul_options = 100;
optional InstanceNormOptions instance_norm_options = 101;
Expand Down
2 changes: 1 addition & 1 deletion compiler/tflchef/core/src/CustomOp/AddV2.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ AddV2Chef::custom_value(flatbuffers::FlatBufferBuilder &fbb) const
{
auto &operation = (*_operation);

assert(operation.type() == "AddV2");
assert(operation.custom_code() == "AddV2");

/**
* REGISTER_OP("AddV2")
Expand Down
2 changes: 1 addition & 1 deletion compiler/tflchef/core/src/CustomOp/All.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ AllChef::custom_value(flatbuffers::FlatBufferBuilder &fbb) const
{
auto &operation = (*_operation);

assert(operation.type() == "All");
assert(operation.custom_code() == "All");

/**
* REGISTER_OP("All")
Expand Down
2 changes: 1 addition & 1 deletion compiler/tflchef/core/src/CustomOp/BatchMatMulV2.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ BatchMatMulV2Chef::custom_value(flatbuffers::FlatBufferBuilder &fbb) const
{
auto &operation = (*_operation);

assert(operation.type() == "BatchMatMulV2");
assert(operation.custom_code() == "BatchMatMulV2");

/**
* REGISTER_OP("BatchMatMulV2")
Expand Down
2 changes: 1 addition & 1 deletion compiler/tflchef/core/src/CustomOp/BroadcastTo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ BroadcastToChef::custom_value(flatbuffers::FlatBufferBuilder &fbb) const
{
auto &operation = (*_operation);

assert(operation.type() == "BroadcastTo");
assert(operation.custom_code() == "BroadcastTo");

/**
* REGISTER_OP("BroadcastTo")
Expand Down
2 changes: 1 addition & 1 deletion compiler/tflchef/core/src/CustomOp/Erf.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ ErfChef::custom_value(flatbuffers::FlatBufferBuilder &fbb) const
{
auto &operation = (*_operation);

assert(operation.type() == "Erf");
assert(operation.custom_code() == "Erf");

/**
* REGISTER_OP("Erf")
Expand Down
2 changes: 1 addition & 1 deletion compiler/tflchef/core/src/CustomOp/MatMul.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ MatMulChef::custom_value(flatbuffers::FlatBufferBuilder &fbb) const
{
auto &operation = (*_operation);

assert(operation.type() == "MatMul");
assert(operation.custom_code() == "MatMul");

/**
* REGISTER_OP("MatMul")
Expand Down
2 changes: 1 addition & 1 deletion compiler/tflchef/core/src/CustomOp/MatrixBandPart.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ MatrixBandPartChef::custom_value(flatbuffers::FlatBufferBuilder &fbb) const
{
auto &operation = (*_operation);

assert(operation.type() == "MatrixBandPart");
assert(operation.custom_code() == "MatrixBandPart");

/**
* REGISTER_OP("MatrixBandPart")
Expand Down
2 changes: 1 addition & 1 deletion compiler/tflchef/core/src/CustomOp/MaxPoolWithArgmax.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ MaxPoolWithArgmaxChef::custom_value(flatbuffers::FlatBufferBuilder &fbb) const
{
auto &operation = (*_operation);

assert(operation.type() == "MaxPoolWithArgmax");
assert(operation.custom_code() == "MaxPoolWithArgmax");

/**
* REGISTER_OP("MaxPoolWithArgmax")
Expand Down
27 changes: 15 additions & 12 deletions compiler/tflchef/core/src/ModelChef.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -141,10 +141,10 @@ gather_builtincode_map(const ::tflchef::ModelRecipe &model_recipe)

for (const auto &operation : model_recipe.operation())
{
auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
if (op_chef->code() == tflite::BuiltinOperator_CUSTOM)
if (operation.type() == "Custom")
continue;

auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
// Various operation version is unified as the highest version among them
if (builtin_map.find(op_chef->code()) == builtin_map.end() ||
builtin_map[op_chef->code()] < operation.version())
Expand All @@ -157,10 +157,10 @@ gather_builtincode_map(const ::tflchef::ModelRecipe &model_recipe)
const auto &graph = model_recipe.graph(g);
for (const auto &operation : graph.operation())
{
auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
if (op_chef->code() == tflite::BuiltinOperator_CUSTOM)
if (operation.type() == "Custom")
continue;

auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
// Various operation version is unified as the highest version among them
if (builtin_map.find(op_chef->code()) == builtin_map.end() ||
builtin_map[op_chef->code()] < operation.version())
Expand All @@ -177,9 +177,8 @@ std::set<std::string> gather_customcode_set(const ::tflchef::ModelRecipe &model_
std::set<std::string> customcode_set;
for (const auto &operation : model_recipe.operation())
{
auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
if (op_chef->code() == tflite::BuiltinOperator_CUSTOM)
customcode_set.insert(operation.type());
if (operation.type() == "Custom")
customcode_set.insert(operation.custom_code());
}

// Add ops used in Graphs(subgraphs)
Expand All @@ -188,9 +187,8 @@ std::set<std::string> gather_customcode_set(const ::tflchef::ModelRecipe &model_
const auto &graph = model_recipe.graph(g);
for (const auto &operation : graph.operation())
{
auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
if (op_chef->code() == tflite::BuiltinOperator_CUSTOM)
customcode_set.insert(operation.type());
if (operation.type() == "Custom")
customcode_set.insert(operation.custom_code());
}
}

Expand Down Expand Up @@ -619,7 +617,11 @@ template <typename T> std::map<std::string, int32_t> cook_graph(const T &graph,
{
assert(operation.has_type());

auto op_chef = op_chef_registry().lookup(operation.type()).create(&operation);
std::string op_type = operation.type();
if (op_type == "Custom")
op_type = operation.custom_code();

auto op_chef = op_chef_registry().lookup(op_type).create(&operation);

// Create 'inputs'
std::vector<int32_t> input_vec = as_dataset(operation.input()).map(lookup).vectorize();
Expand Down Expand Up @@ -650,7 +652,8 @@ template <typename T> std::map<std::string, int32_t> cook_graph(const T &graph,
// custom operator
else
{
auto op_it = std::find(custom_code_vec.begin(), custom_code_vec.end(), operation.type());
auto op_it =
std::find(custom_code_vec.begin(), custom_code_vec.end(), operation.custom_code());
assert(op_it != custom_code_vec.end());
opcode_index = builtin_code_map.size();
opcode_index += std::distance(custom_code_vec.begin(), op_it);
Expand Down
1 change: 1 addition & 0 deletions compiler/tflchef/proto/tflchef.proto
Original file line number Diff line number Diff line change
Expand Up @@ -556,6 +556,7 @@ message Operation {
repeated string input = 2;
repeated string output = 3;
optional int32 version = 4 [default = 1];
optional string custom_code = 5;

optional Conv2DOptions conv2d_options = 100;
optional Pool2DOptions averagepool2d_options = 101;
Expand Down
3 changes: 2 additions & 1 deletion compiler/tflchef/tests/custom_erf/test.recipe
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,10 @@ operand {
shape { dim: 1 dim: 3 dim: 3 dim: 2 }
}
operation {
type: "Erf"
type: "Custom"
input: "ifm"
output: "ofm"
custom_code: "Erf"
}
input: "ifm"
output: "ofm"
3 changes: 2 additions & 1 deletion res/TensorFlowLiteRecipes/All_000/test.recipe
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,14 @@ operand {
}
}
operation {
type: "All"
type: "Custom"
all_options {
keep_dims: false
}
input: "ifm"
input: "All/reduction_indices"
output: "ofm"
custom_code: "All"
}
input: "ifm"
output: "ofm"
3 changes: 2 additions & 1 deletion res/TensorFlowLiteRecipes/BatchMatMulV2_000/test.recipe
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,11 @@ operand {
shape { dim: 1 dim: 2 dim: 4 dim: 4 }
}
operation {
type: "BatchMatMulV2"
type: "Custom"
input: "ifm1"
input: "ifm2"
output: "ofm"
custom_code: "BatchMatMulV2"
}
input: "ifm1"
input: "ifm2"
Expand Down
3 changes: 2 additions & 1 deletion res/TensorFlowLiteRecipes/BatchMatMulV2_001/test.recipe
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,14 @@ operand {
shape { dim: 2 dim: 2 dim: 2 dim: 4 }
}
operation {
type: "BatchMatMulV2"
type: "Custom"
batch_matmul_options {
adj_x : true
}
input: "ifm1"
input: "ifm2"
output: "ofm"
custom_code: "BatchMatMulV2"
}
input: "ifm1"
input: "ifm2"
Expand Down
3 changes: 2 additions & 1 deletion res/TensorFlowLiteRecipes/BroadcastTo_000/test.recipe
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,11 @@ operand {
shape { dim: 1 dim: 2 dim: 3 }
}
operation {
type: "BroadcastTo"
type: "Custom"
input: "bc_input"
input: "bc_shape"
output: "bc_ofm"
custom_code: "BroadcastTo"
}
input: "bc_input"
output: "bc_ofm"
3 changes: 2 additions & 1 deletion res/TensorFlowLiteRecipes/MatMul_000/test.recipe
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,15 @@ operand {
shape { dim: 3 dim: 3 }
}
operation {
type: "MatMul"
type: "Custom"
input: "ifm1"
input: "ifm2"
output: "ofm"
matmul_options {
transpose_a: true
transpose_b: false
}
custom_code: "MatMul"
}
input: "ifm1"
input: "ifm2"
Expand Down
3 changes: 2 additions & 1 deletion res/TensorFlowLiteRecipes/MatrixBandPart_000/test.recipe
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,12 @@ operand {
shape { dim: 4 dim: 4 }
}
operation {
type: "MatrixBandPart"
type: "Custom"
input: "ifm"
input: "MatrixBandPart/num_lower"
input: "MatrixBandPart/num_upper"
output: "ofm"
custom_code: "MatrixBandPart"
}
input: "ifm"
output: "ofm"
3 changes: 2 additions & 1 deletion res/TensorFlowLiteRecipes/MaxPoolWithArgmax_000/test.recipe
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ operand {
shape { dim: 1 dim: 9 dim: 9 dim: 1 }
}
operation {
type: "MaxPoolWithArgmax"
type: "Custom"
input: "ifm"
output: "ofm"
output: "argmax"
Expand All @@ -27,6 +27,7 @@ operation {
output_type: INT64
include_batch_in_index: false
}
custom_code: "MaxPoolWithArgmax"
}
input: "ifm"
output: "ofm"
Expand Down
3 changes: 2 additions & 1 deletion res/TensorFlowLiteRecipes/MaxPoolWithArgmax_001/test.recipe
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ operand {
shape { dim: 1 dim: 9 dim: 9 dim: 1 }
}
operation {
type: "MaxPoolWithArgmax"
type: "Custom"
input: "ifm"
output: "ofm"
output: "argmax"
Expand All @@ -27,6 +27,7 @@ operation {
output_type: INT32
include_batch_in_index: false
}
custom_code: "MaxPoolWithArgmax"
}
input: "ifm"
output: "ofm"
Expand Down
3 changes: 2 additions & 1 deletion res/TensorFlowLiteRecipes/MaxPoolWithArgmax_002/test.recipe
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ operand {
shape { dim: 1 dim: 8 dim: 8 dim: 2 }
}
operation {
type: "MaxPoolWithArgmax"
type: "Custom"
input: "ifm"
output: "ofm"
output: "argmax"
Expand All @@ -27,6 +27,7 @@ operation {
output_type: INT64
include_batch_in_index: false
}
custom_code: "MaxPoolWithArgmax"
}
input: "ifm"
output: "ofm"
Expand Down
Loading

0 comments on commit f9cf248

Please sign in to comment.