diff --git a/compiler/circle-quantizer/onnx_name.cfg b/compiler/circle-quantizer/onnx_name.cfg new file mode 100644 index 00000000000..4bfe5a86318 --- /dev/null +++ b/compiler/circle-quantizer/onnx_name.cfg @@ -0,0 +1,19 @@ +[onecc] +one-import-onnx=True +one-optimize=True +one-quantize=True +include=O1 + +[one-import-onnx] +input_path=onnx_conv2d_conv2d.onnx +output_path=onnx_conv2d_conv2d.circle + +[one-optimize] +input_path=onnx_conv2d_conv2d.circle +output_path=onnx_conv2d_conv2d.opt.circle +convert_nchw_to_nhwc=True + +[one-quantize] +input_path=onnx_conv2d_conv2d.opt.circle +output_path=onnx_conv2d_conv2d.opt.qm.circle +quant_config=onnx_name.qconfig.json diff --git a/compiler/circle-quantizer/onnx_name.qconfig.json b/compiler/circle-quantizer/onnx_name.qconfig.json new file mode 100644 index 00000000000..e686edacb6c --- /dev/null +++ b/compiler/circle-quantizer/onnx_name.qconfig.json @@ -0,0 +1,21 @@ +{ + "default_quantization_dtype" : "uint8", + "default_granularity" : "layer", + "layers" : [ + { + "names" : [ + "onnx_tf_prefix_Relu_3;Add_1;convolution_1;Const_3" + ], + "alternate": [ + { + "onnx_tf_prefix_Relu_3;Add_1;convolution_1;Const_3": + [ + "onnx_tf_prefix_Relu_3;Add_1;convolution_1;Const" + ] + } + ], + "dtype" : "int16", + "granularity" : "channel" + } + ] +} diff --git a/compiler/circle-quantizer/src/CircleQuantizer.cpp b/compiler/circle-quantizer/src/CircleQuantizer.cpp index 02b96f91e46..6c3184b7561 100644 --- a/compiler/circle-quantizer/src/CircleQuantizer.cpp +++ b/compiler/circle-quantizer/src/CircleQuantizer.cpp @@ -84,6 +84,39 @@ std::vector> read_layer_params(std::string &filename } } + // alternate names + for (auto layer : layers) + { + const std::string key_alt_names = "alternate"; + if (layer.isMember(key_alt_names)) + { + auto alternate = layer[key_alt_names][0]; + for (auto altkey : alternate.getMemberNames()) + { + bool found = false; + for (auto &l : p) + { + std::cout << "!!! altkey: " << altkey << std::endl; + if (l->name == altkey) + { + std::cout << "!!! found" << std::endl; + found = true; + for (auto altvalue : alternate[altkey]) + { + std::cout << "!!! altvalue: " << altvalue << std::endl; + l->altnames.push_back(altvalue.asString()); + } + break; + } + } + if (!found) + { + throw std::runtime_error("'" + key_alt_names + "' item not found for '" + altkey + "'"); + } + } + } + } + return p; } diff --git a/compiler/luci/pass/include/luci/CircleQuantizer.h b/compiler/luci/pass/include/luci/CircleQuantizer.h index 463f317903d..6053442af50 100644 --- a/compiler/luci/pass/include/luci/CircleQuantizer.h +++ b/compiler/luci/pass/include/luci/CircleQuantizer.h @@ -35,6 +35,7 @@ class CircleQuantizer final std::string name; std::string dtype; std::string granularity; + std::vector altnames; }; enum Algorithm diff --git a/compiler/one-cmds/one-prepare-venv.tf280 b/compiler/one-cmds/one-prepare-venv.tf280 new file mode 100644 index 00000000000..baa88bede82 --- /dev/null +++ b/compiler/one-cmds/one-prepare-venv.tf280 @@ -0,0 +1,100 @@ +#!/bin/bash + +# Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +DRIVER_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +VENV_ACTIVATE=${DRIVER_PATH}/venv/bin/activate +# NOTE please use venv's python instead of python after `source activation`. +# This script is called by debian maintainer script, i.e. `postinst`. +# Since debian maintainer script is called with sudo, `source activation` is ignored. +VENV_PYTHON=${DRIVER_PATH}/venv/bin/python + +if [ ! -f ${VENV_ACTIVATE} ]; then + # Create python virtual enviornment + python3.8 -m venv "${DRIVER_PATH}/venv" +fi + +# NOTE version +# - https://github.com/onnx/onnx/blob/master/docs/Versioning.md +# - https://github.com/onnx/onnx-tensorflow/blob/master/Versioning.md + +VER_TENSORFLOW=2.8.0 +VER_ONNX=1.11.0 +VER_ONNXRUNTIME=1.11.0 +VER_ONNX_TF=1.10.0 +VER_PYDOT=1.4.2 + +# Install tensorflow + +PIP_TRUSTED_HOST="--trusted-host pypi.org " +PIP_TRUSTED_HOST+="--trusted-host pypi.python.org " +PIP_TRUSTED_HOST+="--trusted-host files.pythonhosted.org " +PIP_TRUSTED_HOST+="--trusted-host download.pytorch.org " + +PIP_TIMEOUT="--default-timeout=1000 " + +PIP_OPTIONS="${PIP_TIMEOUT} ${PIP_TRUSTED_HOST}" + +# NOTE $ONE_PREPVENV_PIP_OPTION is to provide additional PIP options +# such as ceritificate file behind firewall +# ex) ONE_PREPVENV_PIP_OPTION="--cert SomePrivateCetificate.crt" ./one-prepare-venv +if [[ ! -z "$ONE_PREPVENV_PIP_OPTION" ]]; then + PIP_OPTIONS+=" ${ONE_PREPVENV_PIP_OPTION} " +fi + +${VENV_PYTHON} -m pip ${PIP_OPTIONS} install --upgrade pip setuptools +if [ -n "${EXT_TENSORFLOW_WHL}" ]; then + ${VENV_PYTHON} -m pip ${PIP_OPTIONS} install ${EXT_TENSORFLOW_WHL} +else + ${VENV_PYTHON} -m pip ${PIP_OPTIONS} install tensorflow-cpu==${VER_TENSORFLOW} +fi +${VENV_PYTHON} -m pip ${PIP_OPTIONS} install Pillow +# TODO remove version fix, https://github.com/Samsung/ONE/issues/9240 +${VENV_PYTHON} -m pip ${PIP_OPTIONS} install tensorflow_probability==0.16.0 +# TODO remove version fix, https://github.com/Samsung/ONE/issues/10481 +${VENV_PYTHON} -m pip ${PIP_OPTIONS} install tensorflow_addons==0.16.1 + +# Install PyTorch and ONNX related +# NOTE set ONE_PREPVENV_TORCH_STABLE to override 'torch_stable.html' URL. +# torch_stable.html points to download URL of torch wheel file(s) +# but sometimes the server gets unstable, especially from in-house CI. +TORCH_STABLE_URL="https://download.pytorch.org/whl/torch_stable.html" +if [[ ! -z "$ONE_PREPVENV_TORCH_STABLE" ]]; then + TORCH_STABLE_URL="${ONE_PREPVENV_TORCH_STABLE}" +fi +# TODO remove torch message +echo "Torch from '${ONE_PREPVENV_TORCH_STABLE}' -> '${TORCH_STABLE_URL}'" +${VENV_PYTHON} -m pip ${PIP_OPTIONS} install torch==1.11.0+cpu -f ${TORCH_STABLE_URL} + +${VENV_PYTHON} -m pip ${PIP_OPTIONS} install onnx==${VER_ONNX} + +${VENV_PYTHON} -m pip ${PIP_OPTIONS} install onnxruntime==${VER_ONNXRUNTIME} + +# Provide install of custom onnx-tf +if [ -n "${EXT_ONNX_TF_WHL}" ]; then + ${VENV_PYTHON} -m pip ${PIP_OPTIONS} install ${EXT_ONNX_TF_WHL} +else + ${VENV_PYTHON} -m pip ${PIP_OPTIONS} install onnx-tf==${VER_ONNX_TF} +fi + +# NOTE refer https://github.com/protocolbuffers/protobuf/issues/10051 +# TODO remove this when issue is resolved +${VENV_PYTHON} -m pip ${PIP_OPTIONS} install --upgrade protobuf==3.20.1 + +# Install pydot for visq +${VENV_PYTHON} -m pip ${PIP_OPTIONS} install pydot==${VER_PYDOT}