diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2815c62ea..af37ae2fd 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,8 +13,8 @@ jobs: name: Format runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 with: python-version: "3.10" - uses: pre-commit/action@v3.0.0 @@ -27,13 +27,13 @@ jobs: strategy: fail-fast: false matrix: - python-version: [ "3.8", "3.9", "3.10"] + python-version: [ "3.8", "3.10", "3.11"] runs-on: [ubuntu-latest, macos-latest, windows-latest] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} @@ -62,48 +62,37 @@ jobs: - name: Install most optional dependencies run: | - python -m pip install .[all_except_psyneulink] + python -m pip install .[optional] - name: Version info for optional installed packages run: | pip list - name: Install graphviz - if: ${{ matrix.runs-on != 'windows-latest' }} - run: | - if [[ ${{ matrix.runs-on }} == *"macos"* ]]; then brew install graphviz ; fi - if [[ ${{ matrix.runs-on }} == *"ubuntu"* ]]; then sudo apt install graphviz ; fi + uses: ts-graphviz/setup-graphviz@v1 - name: Test interface ACT-R - if: ${{ matrix.python-version != '3.10' || matrix.runs-on != 'windows-latest' }} run: | python -m pytest -v -m "actr" tests/ - name: Test interface PyTorch - if: ${{ matrix.python-version != '3.10' || matrix.runs-on != 'windows-latest' }} run: | python -m pytest -v -m "pytorch" tests/ - name: Test interface NeuroML - if: ${{ matrix.python-version != '3.10' || matrix.runs-on != 'windows-latest' }} run: | + python -m pip install .[neuroml] python -m pytest -v -m "neuroml" tests/ - - name: Test interface TensorFlow linux/mac - if: ${{ matrix.runs-on != 'windows-latest' }} - run: | - dot -V - python -m pytest -v -m "tensorflow" tests/ - - - name: Test interface TensorFlow windows - if: ${{ matrix.python-version != '3.10' && matrix.runs-on == 'windows-latest' }} + - name: Test interface TensorFlow + if: ${{ matrix.python-version != '3.11'}} run: | - choco install graphviz + python -m pip install .[tensorflow] dot -V python -m pytest -v -m "tensorflow" tests/ - name: Test interface PsyNeuLink - if: ${{ matrix.python-version != '3.10' }} + if: ${{ matrix.python-version != '3.11'}} run: | python -m pip install .[psyneulink] python -m pytest -v -m "psyneulink" tests/ @@ -125,7 +114,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Build sdist and wheel run: pipx run --spec build pyproject-build diff --git a/.gitignore b/.gitignore index 2a845f1cc..68d5d3518 100644 --- a/.gitignore +++ b/.gitignore @@ -307,3 +307,4 @@ examples/TensorFlow/Keras/keras_to_MDF /examples/TensorFlow/Keras/MNIST/keras_to_MDF /examples/TensorFlow/Keras/MNIST/keras_to_MDF.1 /examples/TensorFlow/Keras/IRIS/keras_to_MDF.1 +/checkout_pngs.sh diff --git a/docs/MDF_function_specifications.json b/docs/MDF_function_specifications.json index 73969fc76..af079e181 100644 --- a/docs/MDF_function_specifications.json +++ b/docs/MDF_function_specifications.json @@ -20,28 +20,28 @@ "pattern", "curr_goal" ], - "expression_string": "change_goal(pattern,curr_goal)" + "expression_string": "actr.change_goal(pattern,curr_goal)" }, "check_termination": { "description": "Function used to check if no production was selected.", "arguments": [ "production" ], - "expression_string": "check_termination(production)" + "expression_string": "actr.check_termination(production)" }, "chunk_to_string": { "description": "Converts a chunk dictionary to a string format.", "arguments": [ "chunk" ], - "expression_string": "chunk_to_string(chunk)" + "expression_string": "actr.chunk_to_string(chunk)" }, "conflict_resolution_function": { "description": "ACT-R conflict resolution function. Currently selects a production at random from the already matched productions, since utility values and learning\nare not implemented yet.", "arguments": [ "productions" ], - "expression_string": "conflict_resolution_function(productions)" + "expression_string": "actr.conflict_resolution_function(productions)" }, "cos": { "description": "Cosine function", @@ -69,7 +69,7 @@ "noise", "dt" ], - "expression_string": "drift_diffusion_integrator(starting_point,non_decision_time,drift_rate,threshold,noise,dt)" + "expression_string": "ddm.drift_diffusion_integrator(starting_point,non_decision_time,drift_rate,threshold,noise,dt)" }, "exponential": { "description": "Exponential function", @@ -107,7 +107,7 @@ "production", "context" ], - "expression_string": "match_production(production,context)" + "expression_string": "actr.match_production(production,context)" }, "onnx::Abs": { "description": "\nAbsolute takes one input data (Tensor) and produces one output data\n(Tensor) where the absolute is, y = abs(x), is applied to\nthe tensor elementwise.\n", @@ -190,14 +190,14 @@ "expression_string": "onnx_ops.atanh(input)" }, "onnx::AveragePool": { - "description": "\n AveragePool consumes an input tensor X and applies average pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n average pooling consisting of computing the average on all values of a\n subset of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing. The output spatial shape will be following:\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)\n ```\n or\n ```\n output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)\n ```\n if ceil_mode is enabled\n\n ```\n * pad_shape[i] is sum of pads along axis i\n ```\n\n `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:\n ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i]\n ```\n The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero).\n ", + "description": "\n AveragePool consumes an input tensor X and applies average pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n average pooling consisting of computing the average on all values of a\n subset of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing. The output spatial shape will be following:\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)\n ```\n or\n ```\n output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)\n ```\n if ceil_mode is enabled\n\n ```\n * pad_shape[i] is sum of pads along axis i\n ```\n\n `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:\n ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i]\n ```\n The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero).\n ", "arguments": [ "X" ], "expression_string": "onnx_ops.averagepool(X, auto_pad, ceil_mode, count_include_pad, kernel_shape, pads, strides)" }, "onnx::BatchNormalization": { - "description": "\nCarries out batch normalization as described in the paper\nhttps://arxiv.org/abs/1502.03167. Depending on the mode it is being run,\nThere are five required inputs 'X', 'scale', 'B', 'input_mean' and\n'input_var'.\nNote that 'input_mean' and 'input_var' are expected to be the estimated\nstatistics in inference mode (training_mode=False, default),\nand the running statistics in training mode (training_mode=True).\nThere are multiple cases for the number of outputs, which we list below:\n\nOutput case #1: Y, running_mean, running_var (training_mode=True)\nOutput case #2: Y (training_mode=False)\n\nWhen training_mode=False, extra outputs are invalid.\nThe outputs are updated as follows when training_mode=True:\n```\nrunning_mean = input_mean * momentum + current_mean * (1 - momentum)\nrunning_var = input_var * momentum + current_var * (1 - momentum)\n\nY = (X - current_mean) / sqrt(current_var + epsilon) * scale + B\n\nwhere:\n\ncurrent_mean = ReduceMean(X, axis=all_except_channel_index)\ncurrent_var = ReduceVar(X, axis=all_except_channel_index)\n\nNotice that ReduceVar refers to the population variance, and it equals to\nsum(sqrd(x_i - x_avg)) / N\nwhere N is the population size (this formula does not use sample size N - 1).\n\n```\n\nThe computation of ReduceMean and ReduceVar uses float to avoid overflow for float16 inputs.\n\nWhen training_mode=False:\n```\nY = (X - input_mean) / sqrt(input_var + epsilon) * scale + B\n```\n\nFor previous (depreciated) non-spatial cases, implementors are suggested\nto flatten the input shape to (N x C * D1 * D2 * ... * Dn) before a BatchNormalization Op.\nThis operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "description": "\nCarries out batch normalization as described in the paper\nhttps://arxiv.org/abs/1502.03167. Depending on the mode it is being run,\nThere are five required inputs 'X', 'scale', 'B', 'input_mean' and\n'input_var'.\nNote that 'input_mean' and 'input_var' are expected to be the estimated\nstatistics in inference mode (training_mode=False, default),\nand the running statistics in training mode (training_mode=True).\nThere are multiple cases for the number of outputs, which we list below:\n\n* Output case #1: Y, running_mean, running_var (training_mode=True)\n* Output case #2: Y (training_mode=False)\n\nWhen training_mode=False, extra outputs are invalid.\nThe outputs are updated as follows when training_mode=True:\n```\nrunning_mean = input_mean * momentum + current_mean * (1 - momentum)\nrunning_var = input_var * momentum + current_var * (1 - momentum)\n\nY = (X - current_mean) / sqrt(current_var + epsilon) * scale + B\n```\nwhere:\n```\ncurrent_mean = ReduceMean(X, axis=all_except_channel_index)\ncurrent_var = ReduceVar(X, axis=all_except_channel_index)\n```\nNotice that `ReduceVar` refers to the population variance, and it equals to\n`sum(sqrd(x_i - x_avg)) / N`\nwhere `N` is the population size (this formula does not use sample size `N - 1`).\n\nThe computation of ReduceMean and ReduceVar uses float to avoid overflow for float16 inputs.\n\nWhen training_mode=False:\n```\nY = (X - input_mean) / sqrt(input_var + epsilon) * scale + B\n```\n\nFor previous (depreciated) non-spatial cases, implementors are suggested\nto flatten the input shape to (N x C * D1 * D2 * ... * Dn) before a BatchNormalization Op.\nThis operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", "arguments": [ "X", "scale", @@ -215,7 +215,7 @@ "expression_string": "onnx_ops.bernoulli(input, dtype, seed)" }, "onnx::BitShift": { - "description": "\nBitwise shift operator performs element-wise operation. For each input element, if the\n attribute \"direction\" is \"RIGHT\", this operator moves its binary representation toward\n the right side so that the input value is effectively decreased. If the attribute \"direction\"\n is \"LEFT\", bits of binary representation moves toward the left side, which results the\n increase of its actual value. The input X is the tensor to be shifted and another input\n Y specifies the amounts of shifting. For example, if \"direction\" is \"Right\", X is [1, 4],\n and S is [1, 1], the corresponding output Z would be [0, 2]. If \"direction\" is \"LEFT\" with\n X=[1, 2] and S=[1, 2], the corresponding output Y would be [2, 8].\n\n Because this operator supports Numpy-style broadcasting, X's and Y's shapes are\n not necessarily identical.\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md).", + "description": "\nBitwise shift operator performs element-wise operation. For each input element, if the\nattribute \"direction\" is \"RIGHT\", this operator moves its binary representation toward\nthe right side so that the input value is effectively decreased. If the attribute \"direction\"\nis \"LEFT\", bits of binary representation moves toward the left side, which results the\nincrease of its actual value. The input X is the tensor to be shifted and another input\nY specifies the amounts of shifting. For example, if \"direction\" is \"Right\", X is [1, 4],\nand S is [1, 1], the corresponding output Z would be [0, 2]. If \"direction\" is \"LEFT\" with\nX=[1, 2] and S=[1, 2], the corresponding output Y would be [2, 8].\n\nBecause this operator supports Numpy-style broadcasting, X's and Y's shapes are\nnot necessarily identical.\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md).", "arguments": [ "X", "Y" @@ -223,7 +223,7 @@ "expression_string": "onnx_ops.bitshift(X, Y, direction)" }, "onnx::Cast": { - "description": "\nThe operator casts the elements of a given input tensor to a data type\nspecified by the 'to' argument and returns an output tensor of the same size in\nthe converted type. The 'to' argument must be one of the data types specified\nin the 'DataType' enum field in the TensorProto message.\n\nCasting from string tensor in plain (e.g., \"3.14\" and \"1000\") and scientific numeric representations\n(e.g., \"1e-5\" and \"1E8\") to float types is supported. For example, converting string \"100.5\" to an integer may\nresult 100. There are some string literals reserved for special floating-point values;\n\"+INF\" (and \"INF\"), \"-INF\", and \"NaN\" are positive infinity, negative infinity, and not-a-number, respectively.\nAny string which can exactly match \"+INF\" in a case-insensitive way would be mapped to positive infinite. Similarly,\nthis case-insensitive rule is applied to \"INF\" and \"NaN\". When casting from numeric tensors\nto string tensors, plain floating-point representation (such as \"314.15926\") would be used.\nConverting non-numerical-literal string such as \"Hello World!\" is an undefined behavior. Cases\nof converting string representing floating-point arithmetic value, such as \"2.718\", to INT is an undefined behavior.\n\nConversion from a numerical type to any numerical type is always allowed.\nUser must be aware of precision loss and value change caused by range difference between two types.\nFor example, a 64-bit float 3.1415926459 may be round to a 32-bit float 3.141592. Similarly, converting\nan integer 36 to Boolean may produce 1 because we truncate bits which can't be stored in the targeted type.\n\nIn more detail, the conversion among numerical types should follow these rules:\n\n* Casting from floating point to:\n * floating point: +/- infinity if OOR (out of range).\n * fixed point: undefined if OOR.\n * bool: +/- 0.0 to False; all else to True.\n* Casting from fixed point to:\n * floating point: +/- infinity if OOR. (+ infinity in the case of uint)\n * fixed point: when OOR, discard higher bits and reinterpret (with respect to two's complement representation for\nsigned types). For example, 200 (int16) -> -56 (int8).\n * bool: zero to False; nonzero to True.\n* Casting from bool to:\n * floating point: `{1.0, 0.0}`.\n * fixed point: `{1, 0}`.\n * bool: no change.\n", + "description": "\nThe operator casts the elements of a given input tensor to a data type\nspecified by the 'to' argument and returns an output tensor of the same size in\nthe converted type. The 'to' argument must be one of the data types specified\nin the 'DataType' enum field in the TensorProto message.\n\nCasting from string tensor in plain (e.g., \"3.14\" and \"1000\") and scientific numeric representations\n(e.g., \"1e-5\" and \"1E8\") to float types is supported. For example, converting string \"100.5\" to an integer may\nyield result 100. There are some string literals reserved for special floating-point values;\n\"+INF\" (and \"INF\"), \"-INF\", and \"NaN\" are positive infinity, negative infinity, and not-a-number, respectively.\nAny string which can exactly match \"+INF\" in a case-insensitive way would be mapped to positive infinite. Similarly,\nthis case-insensitive rule is applied to \"INF\" and \"NaN\". When casting from numeric tensors\nto string tensors, plain floating-point representation (such as \"314.15926\") would be used.\nConverting non-numerical-literal string such as \"Hello World!\" is an undefined behavior. Cases\nof converting string representing floating-point arithmetic value, such as \"2.718\", to INT is an undefined behavior.\n\nConversion from a numerical type to any numerical type is always allowed.\nUser must be aware of precision loss and value change caused by range difference between two types.\nFor example, a 64-bit float 3.1415926459 may be round to a 32-bit float 3.141592. Similarly, converting\nan integer 36 to Boolean may produce 1 because we truncate bits which can't be stored in the targeted type.\n\nIn more detail, the conversion among numerical types should follow these rules:\n\n* Casting from floating point to:\n * floating point: +/- infinity if OOR (out of range).\n * fixed point: undefined if OOR.\n * bool: +/- 0.0 to False; all else to True.\n* Casting from fixed point to:\n * floating point: +/- infinity if OOR. (+ infinity in the case of uint)\n * fixed point: when OOR, discard higher bits and reinterpret (with respect to two's complement representation for\n signed types). For example, 200 (int16) -> -56 (int8).\n * bool: zero to False; nonzero to True.\n* Casting from bool to:\n * floating point: `{1.0, 0.0}`.\n * fixed point: `{1, 0}`.\n * bool: no change.\n", "arguments": [ "input" ], @@ -238,7 +238,7 @@ "expression_string": "onnx_ops.castlike(input, target_type)" }, "onnx::Ceil": { - "description": "\nCeil takes one input data (Tensor) and produces one output data\n(Tensor) where the ceil is, y = ceil(x), is applied to\nthe tensor elementwise.\n", + "description": "\nCeil takes one input data (Tensor) and produces one output data\n(Tensor) where the ceil is, y = ceil(x), is applied to\nthe tensor elementwise. If x is integral, +0, -0, NaN, or infinite, x itself is returned.\n", "arguments": [ "X" ], @@ -345,14 +345,14 @@ "expression_string": "onnx_ops.cumsum(x, axis, exclusive, reverse)" }, "onnx::DepthToSpace": { - "description": "DepthToSpace rearranges (permutes) data from depth into blocks of spatial data.\nThis is the reverse transformation of SpaceToDepth. More specifically, this op outputs a copy of\nthe input tensor where values from the depth dimension are moved in spatial blocks to the height\nand width dimensions. By default, `mode` = `DCR`.\nIn the DCR mode, elements along the depth dimension from the input tensor are rearranged in the\nfollowing order: depth, column, and then row. The output y is computed from the input x as below:\n\nb, c, h, w = x.shape\n\ntmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])\n\ntmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])\n\ny = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])\n\n\nIn the CRD mode, elements along the depth dimension from the input tensor are rearranged in the\nfollowing order: column, row, and the depth. The output y is computed from the input x as below:\n\nb, c, h, w = x.shape\n\ntmp = np.reshape(x, [b, c // (blocksize ** 2), blocksize, blocksize, h, w])\n\ntmp = np.transpose(tmp, [0, 1, 4, 2, 5, 3])\n\ny = np.reshape(tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize])\n\n", + "description": "DepthToSpace rearranges (permutes) data from depth into blocks of spatial data.\nThis is the reverse transformation of SpaceToDepth. More specifically, this op outputs a copy of\nthe input tensor where values from the depth dimension are moved in spatial blocks to the height\nand width dimensions. By default, `mode` = `DCR`.\nIn the DCR mode, elements along the depth dimension from the input tensor are rearranged in the\nfollowing order: depth, column, and then row. The output y is computed from the input x as below:\n\n```\nb, c, h, w = x.shape\ntmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])\ntmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])\ny = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])\n```\n\nIn the CRD mode, elements along the depth dimension from the input tensor are rearranged in the\nfollowing order: column, row, and the depth. The output y is computed from the input x as below:\n\n```\nb, c, h, w = x.shape\ntmp = np.reshape(x, [b, c // (blocksize ** 2), blocksize, blocksize, h, w])\ntmp = np.transpose(tmp, [0, 1, 4, 2, 5, 3])\ny = np.reshape(tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize])\n```\n", "arguments": [ "input" ], "expression_string": "onnx_ops.depthtospace(input, blocksize, mode)" }, "onnx::DequantizeLinear": { - "description": "\nThe linear dequantization operator. It consumes a quantized tensor, a scale, and a zero point to compute the full precision tensor.\nThe dequantization formula is y = (x - x_zero_point) * x_scale. 'x_scale' and 'x_zero_point' must have same shape, and can be either a scalar\nfor per-tensor / per layer quantization, or a 1-D tensor for per-axis quantization.\n'x_zero_point' and 'x' must have same type. 'x' and 'y' must have same shape. In the case of dequantizing int32,\nthere's no zero point (zero point is supposed to be 0).\n", + "description": "\nThe linear dequantization operator. It consumes a quantized tensor, a scale, and a zero point to compute the full precision tensor.\nThe dequantization formula is `y = (x - x_zero_point) * x_scale`. `x_scale` and `x_zero_point` must have same shape, and can be either a scalar\nfor per-tensor / per layer quantization, or a 1-D tensor for per-axis quantization.\n`x_zero_point` and `x` must have same type. `x` and `y` must have same shape. In the case of dequantizing int32,\nthere's no zero point (zero point is supposed to be 0).\n", "arguments": [ "x", "x_scale", @@ -385,14 +385,14 @@ "expression_string": "onnx_ops.dropout(data, ratio, training_mode, seed)" }, "onnx::DynamicQuantizeLinear": { - "description": "\nA Function to fuse calculation for Scale, Zero Point and FP32->8Bit convertion of FP32 Input data.\nOutputs Scale, ZeroPoint and Quantized Input for a given FP32 Input.\nScale is calculated as:\n```\n y_scale = (max(x) - min(x))/(qmax - qmin)\n * where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8\n * data range is adjusted to include 0.\n```\nZero point is calculated as:\n```\nintermediate_zero_point = qmin - min(x)/y_scale\ny_zero_point = cast(round(saturate(itermediate_zero_point)))\n* where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8\n* for saturation, it saturates to [0, 255] if it's uint8, or [-127, 127] if it's int8. Right now only uint8 is supported.\n* rounding to nearest ties to even.\n```\nData quantization formula is:\n```\ny = saturate (round (x / y_scale) + y_zero_point)\n* for saturation, it saturates to [0, 255] if it's uint8, or [-127, 127] if it's int8. Right now only uint8 is supported.\n* rounding to nearest ties to even.\n```\n", + "description": "\nA Function to fuse calculation for Scale, Zero Point and FP32->8Bit convertion of FP32 Input data.\nOutputs Scale, ZeroPoint and Quantized Input for a given FP32 Input.\nScale is calculated as:\n```\ny_scale = (max(x) - min(x))/(qmax - qmin)\n```\n\n* where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8\n* data range is adjusted to include 0.\n\nZero point is calculated as:\n```\nintermediate_zero_point = qmin - min(x)/y_scale\ny_zero_point = cast(round(saturate(itermediate_zero_point)))\n```\n\n* where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8\n* for saturation, it saturates to [0, 255] if it's uint8, or [-127, 127] if it's int8. Right now only uint8 is supported.\n* rounding to nearest ties to even.\n\nData quantization formula is:\n```\ny = saturate (round (x / y_scale) + y_zero_point)\n```\n\n* for saturation, it saturates to [0, 255] if it's uint8, or [-127, 127] if it's int8. Right now only uint8 is supported.\n* rounding to nearest ties to even.\n", "arguments": [ "x" ], "expression_string": "onnx_ops.dynamicquantizelinear(x)" }, "onnx::Einsum": { - "description": "\nAn einsum of the form ```term1, term2 -> output-term``` produces an output tensor using the following equation\n\n```output[output-term] = reduce-sum( input1[term1] * input2[term] )```\n\nwhere the reduce-sum performs a summation over all the indices occurring in the input terms (term1, term2)\nthat do not occur in the output-term.\n\nThe Einsum operator evaluates algebraic tensor operations on a sequence of tensors, using the Einstein summation\nconvention. The equation string contains a comma-separated sequence of lower case letters. Each term corresponds to\nan operand tensor, and the characters within the terms correspond to operands dimensions.\n\nThis sequence may be followed by \"->\" to separate the left and right hand side of the equation.\nIf the equation contains \"->\" followed by the right-hand side, the explicit (not classical) form of the Einstein\nsummation is performed, and the right-hand side indices indicate output tensor dimensions. In other cases,\noutput indices are (implicitly) set to the alphabetically sorted sequence of indices appearing exactly once in the\nequation.\n\nWhen a dimension character is repeated in the left-hand side, it represents summation along the dimension.\n\nThe equation may contain ellipsis (\"...\") to enable broadcasting. Ellipsis must indicate a fixed number of dimensions.\nSpecifically, every occurrence of ellipsis in the equation must represent the same number of dimensions.\nThe right-hand side may contain exactly one ellipsis. In implicit mode, the ellipsis dimensions are set to the\nbeginning of the output. The equation string may contain space (U+0020) character.\n", + "description": "\nAn einsum of the form `term1, term2 -> output-term` produces an output tensor using the following equation\n\n```\noutput[output-term] = reduce-sum( input1[term1] * input2[term] )\n```\n\nwhere the reduce-sum performs a summation over all the indices occurring in the input terms (term1, term2)\nthat do not occur in the output-term.\n\nThe Einsum operator evaluates algebraic tensor operations on a sequence of tensors, using the Einstein summation\nconvention. The equation string contains a comma-separated sequence of lower case letters. Each term corresponds to\nan operand tensor, and the characters within the terms correspond to operands dimensions.\n\nThis sequence may be followed by \"->\" to separate the left and right hand side of the equation.\nIf the equation contains \"->\" followed by the right-hand side, the explicit (not classical) form of the Einstein\nsummation is performed, and the right-hand side indices indicate output tensor dimensions. In other cases,\noutput indices are (implicitly) set to the alphabetically sorted sequence of indices appearing exactly once in the\nequation.\n\nWhen a dimension character is repeated in the left-hand side, it represents summation along the dimension.\n\nThe equation may contain ellipsis (\"...\") to enable broadcasting. Ellipsis must indicate a fixed number of dimensions.\nSpecifically, every occurrence of ellipsis in the equation must represent the same number of dimensions.\nThe right-hand side may contain exactly one ellipsis. In implicit mode, the ellipsis dimensions are set to the\nbeginning of the output. The equation string may contain space (U+0020) character.\n", "arguments": [ "Inputs" ], @@ -450,14 +450,14 @@ "expression_string": "onnx_ops.flatten(input, axis)" }, "onnx::Floor": { - "description": "\nFloor takes one input data (Tensor) and produces one output data\n(Tensor) where the floor is, y = floor(x), is applied to\nthe tensor elementwise.\n", + "description": "\nFloor takes one input data (Tensor) and produces one output data\n(Tensor) where the floor is, y = floor(x), is applied to\nthe tensor elementwise. If x is integral, +0, -0, NaN, or infinite, x itself is returned.\n", "arguments": [ "X" ], "expression_string": "onnx_ops.floor(X)" }, "onnx::GRU": { - "description": "\nComputes an one-layer GRU. This operator is usually supported via some custom\nimplementation such as CuDNN.\n\nNotations:\n\n`X` - input tensor\n\n`z` - update gate\n\n`r` - reset gate\n\n`h` - hidden gate\n\n`t` - time step (t-1 means previous time step)\n\n`W[zrh]` - W parameter weight matrix for update, reset, and hidden gates\n\n`R[zrh]` - R recurrence weight matrix for update, reset, and hidden gates\n\n`Wb[zrh]` - W bias vectors for update, reset, and hidden gates\n\n`Rb[zrh]` - R bias vectors for update, reset, and hidden gates\n\n`WB[zrh]` - W parameter weight matrix for backward update, reset, and hidden gates\n\n`RB[zrh]` - R recurrence weight matrix for backward update, reset, and hidden gates\n\n`WBb[zrh]` - W bias vectors for backward update, reset, and hidden gates\n\n`RBb[zrh]` - R bias vectors for backward update, reset, and hidden gates\n\n`H` - Hidden state\n\n`num_directions` - 2 if direction == bidirectional else 1\n\nActivation functions:\n\n Relu(x) - max(0, x)\n\n Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})\n\n Sigmoid(x) - 1/(1 + e^{-x})\n\n (NOTE: Below are optional)\n\n Affine(x) - alpha*x + beta\n\n LeakyRelu(x) - x if x >= 0 else alpha * x\n\n ThresholdedRelu(x) - x if x >= alpha else 0\n\n ScaledTanh(x) - alpha*Tanh(beta*x)\n\n HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)\n\n Elu(x) - x if x >= 0 else alpha*(e^x - 1)\n\n Softsign(x) - x/(1 + |x|)\n\n Softplus(x) - log(1 + e^x)\n\nEquations (Default: f=Sigmoid, g=Tanh):\n\n - zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz + Rbz)\n\n - rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr)\n\n - ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh) # default, when linear_before_reset = 0\n\n - ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset != 0\n\n - Ht = (1 - zt) (.) ht + zt (.) Ht-1\nThis operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "description": "\nComputes an one-layer GRU. This operator is usually supported via some custom\nimplementation such as CuDNN.\n\nNotations:\n\n* `X` - input tensor\n* `z` - update gate\n* `r` - reset gate\n* `h` - hidden gate\n* `t` - time step (t-1 means previous time step)\n* `W[zrh]` - W parameter weight matrix for update, reset, and hidden gates\n* `R[zrh]` - R recurrence weight matrix for update, reset, and hidden gates\n* `Wb[zrh]` - W bias vectors for update, reset, and hidden gates\n* `Rb[zrh]` - R bias vectors for update, reset, and hidden gates\n* `WB[zrh]` - W parameter weight matrix for backward update, reset, and hidden gates\n* `RB[zrh]` - R recurrence weight matrix for backward update, reset, and hidden gates\n* `WBb[zrh]` - W bias vectors for backward update, reset, and hidden gates\n* `RBb[zrh]` - R bias vectors for backward update, reset, and hidden gates\n* `H` - Hidden state\n* `num_directions` - 2 if direction == bidirectional else 1\n\nActivation functions:\n\n* Relu(x) - max(0, x)\n* Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})\n* Sigmoid(x) - 1/(1 + e^{-x})\n\nNOTE:\n Below are optional\n\n* Affine(x) - alpha * x + beta\n* LeakyRelu(x) - x if x >= 0 else alpha * x\n* ThresholdedRelu(x) - x if x >= alpha else 0\n* ScaledTanh(x) - alpha * Tanh(beta * x)\n* HardSigmoid(x) - min(max(alpha * x + beta, 0), 1)\n* Elu(x) - x if x >= 0 else alpha * (e^x - 1)\n* Softsign(x) - x/(1 + |x|)\n* Softplus(x) - log(1 + e^x)\n\nEquations (Default: f=Sigmoid, g=Tanh):\n\n* zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz + Rbz)\n* rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr)\n* ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh) # default, when linear_before_reset = 0\n* ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset != 0\n* Ht = (1 - zt) (.) ht + zt (.) Ht-1\nThis operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", "arguments": [ "X", "W", @@ -469,7 +469,7 @@ "expression_string": "onnx_ops.gru(X, W, R, B, sequence_lens, initial_h, activation_alpha, activation_beta, activations, clip, direction, hidden_size, layout, linear_before_reset)" }, "onnx::Gather": { - "description": "\nGiven `data` tensor of rank r >= 1, and `indices` tensor of rank q, gather\nentries of the axis dimension of `data` (by default outer-most one as axis=0) indexed by `indices`, and concatenates\nthem in an output tensor of rank q + (r - 1).\n\naxis = 0 :\n\nLet\nk = indices[i_{0}, ..., i_{q-1}]\nThen\noutput[i_{0}, ..., i_{q-1}, j_{0}, ..., j_{r-2}] = input[k , j_{0}, ..., j_{r-2}]\n\n```\n data = [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n ]\n indices = [\n [0, 1],\n [1, 2],\n ]\n output = [\n [\n [1.0, 1.2],\n [2.3, 3.4],\n ],\n [\n [2.3, 3.4],\n [4.5, 5.7],\n ],\n ]\n```\naxis = 1 :\n\nLet\nk = indices[i_{0}, ..., i_{q-1}]\nThen\noutput[j_{0}, i_{0}, ..., i_{q-1}, j_{1}, ..., j_{r-2}] = input[j_{0}, k, j_{1}, ..., j_{r-2}]\n\n```\n data = [\n [1.0, 1.2, 1.9],\n [2.3, 3.4, 3.9],\n [4.5, 5.7, 5.9],\n ]\n indices = [\n [0, 2],\n ]\n axis = 1,\n output = [\n [[1.0, 1.9]],\n [[2.3, 3.9]],\n [[4.5, 5.9]],\n ]\n```\n", + "description": "\nGiven `data` tensor of rank r >= 1, and `indices` tensor of rank q, gather\nentries of the axis dimension of `data` (by default outer-most one as axis=0) indexed by `indices`, and concatenates\nthem in an output tensor of rank q + (r - 1).\n\nIf `axis = 0`, let `k = indices[i_{0}, ..., i_{q-1}]`\nthen `output[i_{0}, ..., i_{q-1}, j_{0}, ..., j_{r-2}] = input[k , j_{0}, ..., j_{r-2}]`:\n\n```\ndata = [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n]\nindices = [\n [0, 1],\n [1, 2],\n]\noutput = [\n [\n [1.0, 1.2],\n [2.3, 3.4],\n ],\n [\n [2.3, 3.4],\n [4.5, 5.7],\n ],\n]\n```\n\nIf `axis = 1`, let `k = indices[i_{0}, ..., i_{q-1}]`\nthen `output[j_{0}, i_{0}, ..., i_{q-1}, j_{1}, ..., j_{r-2}] = input[j_{0}, k, j_{1}, ..., j_{r-2}]`:\n\n```\ndata = [\n [1.0, 1.2, 1.9],\n [2.3, 3.4, 3.9],\n [4.5, 5.7, 5.9],\n]\nindices = [\n [0, 2],\n]\naxis = 1,\noutput = [\n [[1.0, 1.9]],\n [[2.3, 3.9]],\n [[4.5, 5.9]],\n]\n```\n", "arguments": [ "data", "indices" @@ -477,7 +477,7 @@ "expression_string": "onnx_ops.gather(data, indices, axis)" }, "onnx::GatherElements": { - "description": "\n\nGatherElements takes two inputs `data` and `indices` of the same rank r >= 1\nand an optional attribute `axis` that identifies an axis of `data`\n(by default, the outer-most axis, that is axis 0). It is an indexing operation\nthat produces its output by indexing into the input data tensor at index\npositions determined by elements of the `indices` tensor.\nIts output shape is the same as the shape of `indices` and consists of one value\n(gathered from the `data`) for each element in `indices`.\n\nFor instance, in the 3-D case (r = 3), the output produced is determined\nby the following equations:\n```\n out[i][j][k] = input[index[i][j][k]][j][k] if axis = 0,\n out[i][j][k] = input[i][index[i][j][k]][k] if axis = 1,\n out[i][j][k] = input[i][j][index[i][j][k]] if axis = 2,\n```\n\nThis operator is also the inverse of ScatterElements. It is similar to Torch's gather operation.\n\nExample 1:\n```\n data = [\n [1, 2],\n [3, 4],\n ]\n indices = [\n [0, 0],\n [1, 0],\n ]\n axis = 1\n output = [\n [1, 1],\n [4, 3],\n ]\n```\nExample 2:\n```\n data = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9],\n ]\n indices = [\n [1, 2, 0],\n [2, 0, 0],\n ]\n axis = 0\n output = [\n [4, 8, 3],\n [7, 2, 3],\n ]\n```\n", + "description": "\n\nGatherElements takes two inputs `data` and `indices` of the same rank r >= 1\nand an optional attribute `axis` that identifies an axis of `data`\n(by default, the outer-most axis, that is axis 0). It is an indexing operation\nthat produces its output by indexing into the input data tensor at index\npositions determined by elements of the `indices` tensor.\nIts output shape is the same as the shape of `indices` and consists of one value\n(gathered from the `data`) for each element in `indices`.\n\nFor instance, in the 3-D case (r = 3), the output produced is determined\nby the following equations:\n```\nout[i][j][k] = input[index[i][j][k]][j][k] if axis = 0,\nout[i][j][k] = input[i][index[i][j][k]][k] if axis = 1,\nout[i][j][k] = input[i][j][index[i][j][k]] if axis = 2,\n```\n\nThis operator is also the inverse of ScatterElements. It is similar to Torch's gather operation.\n\nExample 1:\n```\ndata = [\n [1, 2],\n [3, 4],\n]\nindices = [\n [0, 0],\n [1, 0],\n]\naxis = 1\noutput = [\n [1, 1],\n [4, 3],\n]\n```\nExample 2:\n```\ndata = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9],\n]\nindices = [\n [1, 2, 0],\n [2, 0, 0],\n]\naxis = 0\noutput = [\n [4, 8, 3],\n [7, 2, 3],\n]\n```\n", "arguments": [ "data", "indices" @@ -493,7 +493,7 @@ "expression_string": "onnx_ops.gathernd(data, indices, batch_dims)" }, "onnx::Gemm": { - "description": "General Matrix multiplication:\nhttps://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3\n\nA' = transpose(A) if transA else A\n\nB' = transpose(B) if transB else B\n\nCompute Y = alpha * A' * B' + beta * C, where input tensor A has shape (M, K) or (K, M),\ninput tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N),\nand output tensor Y has shape (M, N). A will be transposed before doing the\ncomputation if attribute transA is non-zero, same for B and transB.\nThis operator supports **unidirectional broadcasting** (tensor C should be unidirectional broadcastable to tensor A * B); for more details please check [the doc](Broadcasting.md).\nThis operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "description": "General Matrix multiplication:\nhttps://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3\n\n* A' = transpose(A) if transA else A\n* B' = transpose(B) if transB else B\n\nCompute Y = alpha * A' * B' + beta * C, where input tensor A has shape (M, K) or (K, M),\ninput tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N),\nand output tensor Y has shape (M, N). A will be transposed before doing the\ncomputation if attribute transA is non-zero, same for B and transB.\nThis operator supports **unidirectional broadcasting** (tensor C should be unidirectional broadcastable to tensor A * B); for more details please check [the doc](Broadcasting.md).\nThis operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", "arguments": [ "A", "B", @@ -590,14 +590,14 @@ "expression_string": "onnx_ops.isnan(X)" }, "onnx::LRN": { - "description": "\nLocal Response Normalization proposed in the [AlexNet paper](https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf).\nIt normalizes over local input regions.\nThe local region is defined across the channels. For an element X[n, c, d1, ..., dk] in a tensor\nof shape (N x C x D1 x D2, ..., Dk), its region is\n{X[n, i, d1, ..., dk] | max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2))}.\n\nsquare_sum[n, c, d1, ..., dk] = sum(X[n, i, d1, ..., dk] ^ 2),\nwhere max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2)).\n\nY[n, c, d1, ..., dk] = X[n, c, d1, ..., dk] / (bias + alpha / size * square_sum[n, c, d1, ..., dk] ) ^ beta\n", + "description": "\nLocal Response Normalization proposed in the [AlexNet paper](https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf).\nIt normalizes over local input regions.\nThe local region is defined across the channels. For an element `X[n, c, d1, ..., dk]` in a tensor\nof shape `(N x C x D1 x D2, ..., Dk)`, its region is\n`{X[n, i, d1, ..., dk] | max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2))}`.\n\n`square_sum[n, c, d1, ..., dk] = sum(X[n, i, d1, ..., dk] ^ 2)`,\nwhere `max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2))`.\n\n`Y[n, c, d1, ..., dk] = X[n, c, d1, ..., dk] / (bias + alpha / size * square_sum[n, c, d1, ..., dk] ) ^ beta`\n", "arguments": [ "X" ], "expression_string": "onnx_ops.lrn(X, alpha, beta, bias, size)" }, "onnx::LSTM": { - "description": "\nComputes an one-layer LSTM. This operator is usually supported via some\ncustom implementation such as CuDNN.\n\nNotations:\n\n`X` - input tensor\n\n`i` - input gate\n\n`o` - output gate\n\n`f` - forget gate\n\n`c` - cell gate\n\n`t` - time step (t-1 means previous time step)\n\n`W[iofc]` - W parameter weight matrix for input, output, forget, and cell gates\n\n`R[iofc]` - R recurrence weight matrix for input, output, forget, and cell gates\n\n`Wb[iofc]` - W bias vectors for input, output, forget, and cell gates\n\n`Rb[iofc]` - R bias vectors for input, output, forget, and cell gates\n\n`P[iof]` - P peephole weight vector for input, output, and forget gates\n\n`WB[iofc]` - W parameter weight matrix for backward input, output, forget, and cell gates\n\n`RB[iofc]` - R recurrence weight matrix for backward input, output, forget, and cell gates\n\n`WBb[iofc]` - W bias vectors for backward input, output, forget, and cell gates\n\n`RBb[iofc]` - R bias vectors for backward input, output, forget, and cell gates\n\n`PB[iof]` - P peephole weight vector for backward input, output, and forget gates\n\n`H` - Hidden state\n\n`num_directions` - 2 if direction == bidirectional else 1\n\nActivation functions:\n\n Relu(x) - max(0, x)\n\n Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})\n\n Sigmoid(x) - 1/(1 + e^{-x})\n\n (NOTE: Below are optional)\n\n Affine(x) - alpha*x + beta\n\n LeakyRelu(x) - x if x >= 0 else alpha * x\n\n ThresholdedRelu(x) - x if x >= alpha else 0\n\n ScaledTanh(x) - alpha*Tanh(beta*x)\n\n HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)\n\n Elu(x) - x if x >= 0 else alpha*(e^x - 1)\n\n Softsign(x) - x/(1 + |x|)\n\n Softplus(x) - log(1 + e^x)\n\nEquations (Default: f=Sigmoid, g=Tanh, h=Tanh):\n\n - it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi)\n\n - ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf)\n\n - ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc)\n\n - Ct = ft (.) Ct-1 + it (.) ct\n\n - ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo)\n\n - Ht = ot (.) h(Ct)\nThis operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "description": "\nComputes an one-layer LSTM. This operator is usually supported via some\ncustom implementation such as CuDNN.\n\nNotations:\n\n* `X` - input tensor\n* `i` - input gate\n* `o` - output gate\n* `f` - forget gate\n* `c` - cell gate\n* `t` - time step (t-1 means previous time step)\n* `W[iofc]` - W parameter weight matrix for input, output, forget, and cell gates\n* `R[iofc]` - R recurrence weight matrix for input, output, forget, and cell gates\n* `Wb[iofc]` - W bias vectors for input, output, forget, and cell gates\n* `Rb[iofc]` - R bias vectors for input, output, forget, and cell gates\n* `P[iof]` - P peephole weight vector for input, output, and forget gates\n* `WB[iofc]` - W parameter weight matrix for backward input, output, forget, and cell gates\n* `RB[iofc]` - R recurrence weight matrix for backward input, output, forget, and cell gates\n* `WBb[iofc]` - W bias vectors for backward input, output, forget, and cell gates\n* `RBb[iofc]` - R bias vectors for backward input, output, forget, and cell gates\n* `PB[iof]` - P peephole weight vector for backward input, output, and forget gates\n* `H` - Hidden state\n* `num_directions` - 2 if direction == bidirectional else 1\n\nActivation functions:\n\n* Relu(x) - max(0, x)\n* Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})\n* Sigmoid(x) - 1/(1 + e^{-x})\n\nNOTE: Below are optional\n\n* Affine(x) - alpha*x + beta\n* LeakyRelu(x) - x if x >= 0 else alpha * x\n* ThresholdedRelu(x) - x if x >= alpha else 0\n* ScaledTanh(x) - alpha*Tanh(beta*x)\n* HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)\n* Elu(x) - x if x >= 0 else alpha*(e^x - 1)\n* Softsign(x) - x/(1 + |x|)\n* Softplus(x) - log(1 + e^x)\n\nEquations (Default: f=Sigmoid, g=Tanh, h=Tanh):\n\n* it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi)\n* ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf)\n* ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc)\n* Ct = ft (.) Ct-1 + it (.) ct\n* ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo)\n* Ht = ot (.) h(Ct)\nThis operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", "arguments": [ "X", "W", @@ -687,7 +687,7 @@ "expression_string": "onnx_ops.max(data_0)" }, "onnx::MaxPool": { - "description": "\n MaxPool consumes an input tensor X and applies max pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n max pooling consisting of computing the max on all values of a\n subset of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing. The output spatial shape will be following:\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)\n ```\n or\n ```\n output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)\n ```\n if ceil_mode is enabled\n\n ```\n * pad_shape[i] is sum of pads along axis i\n ```\n\n `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:\n ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i]\n ```\n The output of each pooling window is maximum number of elements exclude pad. \n ", + "description": "\n MaxPool consumes an input tensor X and applies max pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n max pooling consisting of computing the max on all values of a\n subset of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing. The output spatial shape will be following:\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)\n ```\n or\n ```\n output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)\n ```\n if ceil_mode is enabled `pad_shape[i]` is the sum of pads along axis `i`.\n\n `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:\n ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i]\n ```\n The output of each pooling window is maximum number of elements exclude pad. \n ", "arguments": [ "X" ], @@ -718,7 +718,7 @@ "expression_string": "onnx_ops.mean(data_0)" }, "onnx::MeanVarianceNormalization": { - "description": "\n A MeanVarianceNormalization Function: Perform mean variance normalization\n on the input tensor X using formula:
``` (X-EX)/sqrt(E(X-EX)^2) ```\n", + "description": "\n A MeanVarianceNormalization Function: Perform mean variance normalization\n on the input tensor X using formula: `(X-EX)/sqrt(E(X-EX)^2)`\n", "arguments": [ "X" ], @@ -732,7 +732,7 @@ "expression_string": "onnx_ops.min(data_0)" }, "onnx::Mod": { - "description": "\n Performs element-wise binary modulus (with Numpy-style broadcasting support).\n The sign of the remainder is the same as that of the Divisor.\n\n Mod operator can also behave like C fmod() or numpy.fmod. In this case, the sign of the remainder however, will be the same as the Dividend\n (in contrast to integer mod). To force a behavior like numpy.fmod() an 'fmod' Attribute is provided.\n This attribute is set to 0 by default causing the behavior to be like integer mod.\n Setting this attribute to 1 causes the remainder to be calculated similar to that of numpy.fmod().\n\n If the input type is floating point, then `fmod` attribute must be set to 1.\n\n In case of dividend being zero, the results will be platform dependent.\n\n This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md).\n", + "description": "\n Performs element-wise binary modulus (with Numpy-style broadcasting support).\n The sign of the remainder is the same as that of the Divisor.\n\n Mod operator can also behave like C fmod() or numpy.fmod. In this case, the sign of the remainder however, will be the same as the Dividend\n (in contrast to integer mod). To force a behavior like numpy.fmod() an 'fmod' Attribute is provided.\n This attribute is set to 0 by default causing the behavior to be like integer mod.\n Setting this attribute to 1 causes the remainder to be calculated similar to that of numpy.fmod().\n\n If the input type is floating point, then `fmod` attribute must be set to 1.\n\n In case of dividend being zero, the results will be platform dependent.\n\n This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md).\n", "arguments": [ "A", "B" @@ -762,7 +762,7 @@ "expression_string": "onnx_ops.neg(X)" }, "onnx::NegativeLogLikelihoodLoss": { - "description": "\nA NegativeLogLikelihoodLoss operator computes (weighted) negative log likelihood loss.\nIts \"input\" tensor has the shape of (N, C, d1, d2, ..., dk) where k >= 0.\nThe \"input\" tensor contains log-probabilities for input[n, :, d_1, d_2,..., d_k] being in a class of [0, C).\nThe operator's \"target\" input tensor has the shape of (N, d1, d2, ..., dk). It encodes class labels (one of C classes)\nor it may contain a special value (indicated by an attribute ignore_index) for N x d1 x d2 x ... x dk samples.\nThe loss value for input[n, :, d_1, d_2,...d_k] being classified as class c = target[n][d_1][d_2]...[d_k] is computed as:\n\n loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k].\n\nWhen an optional \"weight\" is provided, the sample loss is calculated as:\n\n loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k] * weight[c].\n\nloss is zero for the case when target-value equals ignore_index.\n\n loss[n][d_1][d_2]...[d_k] = 0, when target[n][d_1][d_2]...[d_k] = ignore_index\n\nIf \"reduction\" attribute is set to \"none\", the operator's output will be the above loss with shape (N, d1, d2, ..., dk).\nIf \"reduction\" attribute is set to \"mean\" (the default attribute value), the output loss is (weight) averaged:\n\n mean(loss), if \"weight\" is not provided,\n\nor if weight is provided,\n\n sum(loss) / sum(weight[target[n][d_1][d_2]...[d_k]]]), for all samples.\n\nIf \"reduction\" attribute is set to \"sum\", the output is a scalar:\n sum(loss).\n\nSee also https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss.\n\nExample 1:\n\n // negative log likelihood loss, \"none\" reduction\n N, C, d1 = 2, 3, 2\n input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],\n [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]\n target = [[2, 1], [0, 2]]\n\n loss = np.zeros((N, d1))\n for n in range(N):\n for d_1 in range(d1):\n c = target[n][d_1]\n loss[n][d_1] = -input[n][c][d_1]\n\n // print(loss)\n // [[-3. -2.]\n // [-0. -2.]]\n\nExample 2:\n\n // weighted negative log likelihood loss, sum reduction\n N, C, d1 = 2, 3, 2\n input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],\n [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]\n target = [[2, 1], [0, 2]]\n weight = [0.2, 0.3, 0.1]\n loss = np.zeros((N, d1))\n for n in range(N):\n for d_1 in range(d1):\n c = target[n][d_1]\n loss[n][d_1] = -input[n][c][d_1] * weight[c]\n\n loss = np.sum(loss)\n // print(loss)\n // -1.1\n\nExample 3:\n\n // weighted negative log likelihood loss, mean reduction\n N, C, d1 = 2, 3, 2\n input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],\n [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]\n target = [[2, 1], [0, 2]]\n weight = [0.2, 0.3, 0.1]\n loss = np.zeros((N, d1))\n weight_total = 0\n for n in range(N):\n for d_1 in range(d1):\n c = target[n][d_1]\n loss[n][d_1] = -input[n][c][d_1] * weight[c]\n weight_total = weight_total + weight[c]\n\n loss = np.sum(loss) / weight_total\n // print(loss)\n // -1.57\n", + "description": "\nA NegativeLogLikelihoodLoss operator computes (weighted) negative log likelihood loss.\nIts \"input\" tensor has the shape of (N, C, d1, d2, ..., dk) where k >= 0.\nThe \"input\" tensor contains log-probabilities for input[n, :, d_1, d_2,..., d_k] being in a class of [0, C).\nThe operator's \"target\" input tensor has the shape of (N, d1, d2, ..., dk). It encodes class labels (one of C classes)\nor it may contain a special value (indicated by an attribute ignore_index) for N x d1 x d2 x ... x dk samples.\nThe loss value for input[n, :, d_1, d_2,...d_k] being classified as class c = target[n][d_1][d_2]...[d_k] is computed as:\n\n```\nloss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k].\n```\n\nWhen an optional \"weight\" is provided, the sample loss is calculated as:\n\n```\nloss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k] * weight[c].\n```\n\nloss is zero for the case when target-value equals ignore_index.\n\n```\nloss[n][d_1][d_2]...[d_k] = 0, when target[n][d_1][d_2]...[d_k] = ignore_index\n```\n\nIf \"reduction\" attribute is set to \"none\", the operator's output will be the above loss with shape (N, d1, d2, ..., dk).\nIf \"reduction\" attribute is set to \"mean\" (the default attribute value), the output loss is (weight) averaged:\n\n```\nmean(loss), if \"weight\" is not provided,\n```\n\nor if weight is provided,\n\n```\nsum(loss) / sum(weight[target[n][d_1][d_2]...[d_k]]]), for all samples.\n```\n\nIf \"reduction\" attribute is set to \"sum\", the output is a scalar: `sum(loss)`.\n\nSee also https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss.\n\nExample 1:\n\n```\n// negative log likelihood loss, \"none\" reduction\nN, C, d1 = 2, 3, 2\ninput = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],\n [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]\ntarget = [[2, 1], [0, 2]]\n\nloss = np.zeros((N, d1))\nfor n in range(N):\n for d_1 in range(d1):\n c = target[n][d_1]\n loss[n][d_1] = -input[n][c][d_1]\n\n// print(loss)\n// [[-3. -2.]\n// [-0. -2.]]\n```\n\nExample 2:\n\n```\n// weighted negative log likelihood loss, sum reduction\nN, C, d1 = 2, 3, 2\ninput = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],\n [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]\ntarget = [[2, 1], [0, 2]]\nweight = [0.2, 0.3, 0.1]\nloss = np.zeros((N, d1))\nfor n in range(N):\n for d_1 in range(d1):\n c = target[n][d_1]\n loss[n][d_1] = -input[n][c][d_1] * weight[c]\n\nloss = np.sum(loss)\n// print(loss)\n// -1.1\n```\n\nExample 3:\n\n```\n// weighted negative log likelihood loss, mean reduction\nN, C, d1 = 2, 3, 2\ninput = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],\n [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]\ntarget = [[2, 1], [0, 2]]\nweight = [0.2, 0.3, 0.1]\nloss = np.zeros((N, d1))\nweight_total = 0\nfor n in range(N):\n for d_1 in range(d1):\n c = target[n][d_1]\n loss[n][d_1] = -input[n][c][d_1] * weight[c]\n weight_total = weight_total + weight[c]\n\nloss = np.sum(loss) / weight_total\n// print(loss)\n// -1.57\n```\n", "arguments": [ "input", "target", @@ -890,7 +890,7 @@ "expression_string": "onnx_ops.qlinearmatmul(a, a_scale, a_zero_point, b, b_scale, b_zero_point, y_scale, y_zero_point)" }, "onnx::QuantizeLinear": { - "description": "\nThe linear quantization operator. It consumes a high precision tensor, a scale, and a zero point to compute the low precision / quantized tensor.\nThe scale factor and zero point must have same shape, and can be either a scalar for per-tensor / per layer quantization, or a 1-D tensor for per-axis quantization.\nThe quantization formula is y = saturate ((x / y_scale) + y_zero_point).\nFor saturation, it saturates to [0, 255] if it's uint8, or [-128, 127] if it's int8.\nFor (x / y_scale), it's rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details. 'y_zero_point' and 'y' must have same type.\n", + "description": "\nThe linear quantization operator. It consumes a high precision tensor, a scale, and a zero point to compute the low precision / quantized tensor.\nThe scale factor and zero point must have same shape, and can be either a scalar for per-tensor / per layer quantization, or a 1-D tensor for per-axis quantization.\nThe quantization formula is y = saturate ((x / y_scale) + y_zero_point).\nFor saturation, it saturates to [0, 255] if it's uint8, or [-128, 127] if it's int8.\nFor (x / y_scale), it's rounding to the nearest even. Refer to https://en.wikipedia.org/wiki/Rounding for details. 'y_zero_point' and 'y' must have same type.\n", "arguments": [ "x", "y_scale", @@ -899,7 +899,7 @@ "expression_string": "onnx_ops.quantizelinear(x, y_scale, y_zero_point, axis)" }, "onnx::RNN": { - "description": "\nComputes an one-layer simple RNN. This operator is usually supported\nvia some custom implementation such as CuDNN.\n\nNotations:\n\n`X` - input tensor\n\n`i` - input gate\n\n`t` - time step (t-1 means previous time step)\n\n`Wi` - W parameter weight matrix for input gate\n\n`Ri` - R recurrence weight matrix for input gate\n\n`Wbi` - W parameter bias vector for input gate\n\n`Rbi` - R parameter bias vector for input gate\n\n`WBi` - W parameter weight matrix for backward input gate\n\n`RBi` - R recurrence weight matrix for backward input gate\n\n`WBbi` - WR bias vectors for backward input gate\n\n`RBbi` - RR bias vectors for backward input gate\n\n`H` - Hidden state\n\n`num_directions` - 2 if direction == bidirectional else 1\n\nActivation functions:\n\n Relu(x) - max(0, x)\n\n Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})\n\n Sigmoid(x) - 1/(1 + e^{-x})\n\n (NOTE: Below are optional)\n\n Affine(x) - alpha*x + beta\n\n LeakyRelu(x) - x if x >= 0 else alpha * x\n\n ThresholdedRelu(x) - x if x >= alpha else 0\n\n ScaledTanh(x) - alpha*Tanh(beta*x)\n\n HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)\n\n Elu(x) - x if x >= 0 else alpha*(e^x - 1)\n\n Softsign(x) - x/(1 + |x|)\n\n Softplus(x) - log(1 + e^x)\n\nEquations (Default: f=Tanh):\n\n - Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi)\nThis operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "description": "\nComputes an one-layer simple RNN. This operator is usually supported\nvia some custom implementation such as CuDNN.\n\nNotations:\n\n* `X` - input tensor\n* `i` - input gate\n* `t` - time step (t-1 means previous time step)\n* `Wi` - W parameter weight matrix for input gate\n* `Ri` - R recurrence weight matrix for input gate\n* `Wbi` - W parameter bias vector for input gate\n* `Rbi` - R parameter bias vector for input gate\n* `WBi` - W parameter weight matrix for backward input gate\n* `RBi` - R recurrence weight matrix for backward input gate\n* `WBbi` - WR bias vectors for backward input gate\n* `RBbi` - RR bias vectors for backward input gate\n* `H` - Hidden state\n* `num_directions` - 2 if direction == bidirectional else 1\n\nActivation functions:\n\n* Relu(x) - max(0, x)\n* Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})\n* Sigmoid(x) - 1/(1 + e^{-x})\n\nNOTE: Below are optional\n\n* Affine(x) - alpha*x + beta\n* LeakyRelu(x) - x if x >= 0 else alpha * x\n* ThresholdedRelu(x) - x if x >= alpha else 0\n* ScaledTanh(x) - alpha*Tanh(beta*x)\n* HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)\n* Elu(x) - x if x >= 0 else alpha*(e^x - 1)\n* Softsign(x) - x/(1 + |x|)\n* Softplus(x) - log(1 + e^x)\n\nEquations (Default: f=Tanh):\n\n* Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi)\nThis operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", "arguments": [ "X", "W", @@ -935,7 +935,7 @@ "expression_string": "onnx_ops.randomuniformlike(input, dtype, high, low, seed)" }, "onnx::Range": { - "description": "\nGenerate a tensor containing a sequence of numbers that begin at `start` and extends by increments of `delta`\nup to `limit` (exclusive).\n\nThe number of elements in the output of range is computed as below-\n\n`number_of_elements = max( ceil( (limit - start) / delta ) , 0 )`\n\nThe pseudocode determining the contents of the output is shown below-\n\n`for(int i=0; i r is equivalent to specifying an end\nvalue of r, and specifying any start value < -r is equivalent to specifying a start\nvalue of 0.\n\nFor example:\nInput tensor with shape: [2, 3, 4]\nNo attributes specified.\nOutput: [2, 3, 4]\n\nInput tensor with shape: [2, 3, 4]\nstart: -1\nOutput: [4]\n\nInput tensor with shape: [2, 3, 4]\nend: -1\nOutput: [2, 3]\n\nInput tensor with shape: [2, 3, 4]\nstart: 1\nend: 2\nOutput: [3]\n", + "description": "\nTakes a tensor as input and outputs an 1D int64 tensor containing the shape of the input tensor.\nOptional attributes start and end can be used to compute a slice of the input tensor's shape.\nIf start axis is omitted, the slice starts from axis 0.\nThe end axis, if specified, is exclusive (and the returned value will not include the size of that axis).\nIf the end axis is omitted, the axes upto the last one will be included.\nNegative axes indicate counting back from the last axis.\nNote that axes will be clamped to the range [0, r-1], where r is the\nrank of the input tensor if they are out-of-range (after adding r in the case of\nnegative axis). Thus, specifying any end value > r is equivalent to specifying an end\nvalue of r, and specifying any start value < -r is equivalent to specifying a start\nvalue of 0.\n\nExamples:\n\n```\nInput tensor with shape: [2, 3, 4]\nNo attributes specified.\nOutput: [2, 3, 4]\n```\n\n```\nInput tensor with shape: [2, 3, 4]\nstart: -1\nOutput: [4]\n```\n\n```\nInput tensor with shape: [2, 3, 4]\nend: -1\nOutput: [2, 3]\n```\n\n```\nInput tensor with shape: [2, 3, 4]\nstart: 1\nend: 2\nOutput: [3]\n```\n", "arguments": [ "data" ], @@ -1198,7 +1198,7 @@ "expression_string": "onnx_ops.size(data)" }, "onnx::Slice": { - "description": "\nProduces a slice of the input tensor along multiple axes. Similar to numpy:\nhttps://numpy.org/doc/stable/user/basics.indexing.html?highlight=slice#slicing-and-striding\n\nSlice uses the `starts`, `ends`, `axes` and `steps` inputs to select a sub-tensor\nof its input `data` tensor.\n\nAn effective `start[i]`, `end[i]`, and `step[i]` must be computed for each `i`\nin `[0, ... r-1]` where `r = rank(input)` as follows:\n\nIf `axes` are omitted, they are set to `[0, ..., r-1]`.\nIf `steps` are omitted, they are set to `[1, ..., 1]` of length `len(starts)`\n\nThe effective values are initialized as `start[i] = 0`, `end[i] = dims[i]` where\n`dims` are the dimensions of `input` and `step[i] = `1.\n\nAll negative elements of `axes` are made non-negatve by adding `r` to them, where\n`r =rank(input)`.\n\nAll negative values in `starts[i]` and `ends[i]` have `dims[axes[i]]` added to them,\nwhere `dims` are the dimensions of `input`. Then `start[axes[i]]` is the adjusted\n`starts[i]` is clamped into the range `[0, dims[axes[i]]]` for positive stepping\nand `[0, dims[axes[i]]-1]` for negative stepping.\n\nThe clamping for the adjusted `ends[i]` depends on the sign of `steps[i]` and must\naccommodate copying 0 through `dims[axes[i]]` elements, so for positive stepping\n`end[axes[i]]` is clamped to `[0, dims[axes[i]]]`, while for negative stepping it\nis clamped to `[-1, dims[axes[i]]-1]`.\n\nFinally, `step[axes[i]] = steps[i]`.\n\nFor slicing to the end of a dimension with unknown size, it is recommended to pass\nin `INT_MAX` when slicing forward and 'INT_MIN' when slicing backward.\n\nExample 1:\n data = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n axes = [0, 1]\n starts = [1, 0]\n ends = [2, 3]\n steps = [1, 2]\n result = [\n [5, 7],\n ]\nExample 2:\n data = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n starts = [0, 1]\n ends = [-1, 1000]\n result = [\n [2, 3, 4],\n ]\n", + "description": "\nProduces a slice of the input tensor along multiple axes. Similar to numpy:\nhttps://numpy.org/doc/stable/user/basics.indexing.html?highlight=slice#slicing-and-striding\n\nSlice uses the `starts`, `ends`, `axes` and `steps` inputs to select a sub-tensor\nof its input `data` tensor.\n\nAn effective `start[i]`, `end[i]`, and `step[i]` must be computed for each `i`\nin `[0, ... r-1]` where `r = rank(input)` as follows:\n\nIf `axes` are omitted, they are set to `[0, ..., r-1]`.\nIf `steps` are omitted, they are set to `[1, ..., 1]` of length `len(starts)`\n\nThe effective values are initialized as `start[i] = 0`, `end[i] = dims[i]` where\n`dims` are the dimensions of `input` and `step[i] = `1.\n\nAll negative elements of `axes` are made non-negatve by adding `r` to them, where\n`r =rank(input)`.\n\nAll negative values in `starts[i]` and `ends[i]` have `dims[axes[i]]` added to them,\nwhere `dims` are the dimensions of `input`. Then `start[axes[i]]` is the adjusted\n`starts[i]` is clamped into the range `[0, dims[axes[i]]]` for positive stepping\nand `[0, dims[axes[i]]-1]` for negative stepping.\n\nThe clamping for the adjusted `ends[i]` depends on the sign of `steps[i]` and must\naccommodate copying 0 through `dims[axes[i]]` elements, so for positive stepping\n`end[axes[i]]` is clamped to `[0, dims[axes[i]]]`, while for negative stepping it\nis clamped to `[-1, dims[axes[i]]-1]`.\n\nFinally, `step[axes[i]] = steps[i]`.\n\nFor slicing to the end of a dimension with unknown size, it is recommended to pass\nin `INT_MAX` when slicing forward and 'INT_MIN' when slicing backward.\n\nExample 1:\n\n```\ndata = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n]\naxes = [0, 1]\nstarts = [1, 0]\nends = [2, 3]\nsteps = [1, 2]\nresult = [\n [5, 7],\n]\n```\n\nExample 2:\n\n```\ndata = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n]\nstarts = [0, 1]\nends = [-1, 1000]\nresult = [\n [2, 3, 4],\n]\n```\n", "arguments": [ "data", "starts", @@ -1216,7 +1216,7 @@ "expression_string": "onnx_ops.softmax(input, axis)" }, "onnx::SoftmaxCrossEntropyLoss": { - "description": "Loss function that measures the softmax cross entropy\nbetween 'scores' and 'labels'.\nThis operator first computes a loss tensor whose shape is identical to the labels input.\nIf the input is 2-D with shape (N, C), the loss tensor may be a N-element vector L = (l_1, l_2, ..., l_N).\nIf the input is N-D tensor with shape (N, C, D1, D2, ..., Dk),\nthe loss tensor L may have (N, D1, D2, ..., Dk) as its shape and L[i,][j_1][j_2]...[j_k] denotes a scalar element in L.\nAfter L is available, this operator can optionally do a reduction operator.\n\nshape(scores): (N, C) where C is the number of classes, or (N, C, D1, D2,..., Dk),\n with K >= 1 in case of K-dimensional loss.\nshape(labels): (N) where each value is 0 <= labels[i] <= C-1, or (N, D1, D2,..., Dk),\n with K >= 1 in case of K-dimensional loss.\n\nThe loss for one sample, l_i, can caculated as follows:\n l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk], where i is the index of classes.\nor\n l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk] * weights[c], if 'weights' is provided.\n\nloss is zero for the case when label-value equals ignore_index.\n l[i][d1][d2]...[dk] = 0, when labels[n][d1][d2]...[dk] = ignore_index\n\nwhere:\n p = Softmax(scores)\n y = Log(p)\n c = labels[i][d1][d2]...[dk]\n\nFinally, L is optionally reduced:\nIf reduction = 'none', the output is L with shape (N, D1, D2, ..., Dk).\nIf reduction = 'sum', the output is scalar: Sum(L).\nIf reduction = 'mean', the output is scalar: ReduceMean(L), or if weight is provided: ReduceSum(L) / ReduceSum(W),\nwhere tensor W is of shape (N, D1, D2, ..., Dk) and W[n][d1][d2]...[dk] = weights[labels[i][d1][d2]...[dk]].\n", + "description": "Loss function that measures the softmax cross entropy\nbetween 'scores' and 'labels'.\nThis operator first computes a loss tensor whose shape is identical to the labels input.\nIf the input is 2-D with shape (N, C), the loss tensor may be a N-element vector L = (l_1, l_2, ..., l_N).\nIf the input is N-D tensor with shape (N, C, D1, D2, ..., Dk),\nthe loss tensor L may have (N, D1, D2, ..., Dk) as its shape and L[i,][j_1][j_2]...[j_k] denotes a scalar element in L.\nAfter L is available, this operator can optionally do a reduction operator.\n\n* shape(scores): (N, C) where C is the number of classes, or (N, C, D1, D2,..., Dk),\n with K >= 1 in case of K-dimensional loss.\n* shape(labels): (N) where each value is 0 <= labels[i] <= C-1, or (N, D1, D2,..., Dk),\n with K >= 1 in case of K-dimensional loss.\n\nThe loss for one sample, l_i, can caculated as follows:\n```\nl[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk], where i is the index of classes.\n```\nor\n```\nl[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk] * weights[c], if 'weights' is provided.\n```\n\nloss is zero for the case when label-value equals ignore_index.\n```\nl[i][d1][d2]...[dk] = 0, when labels[n][d1][d2]...[dk] = ignore_index\n```\n\nwhere:\n```\np = Softmax(scores)\ny = Log(p)\nc = labels[i][d1][d2]...[dk]\n```\n\nFinally, L is optionally reduced:\n\n* If reduction = 'none', the output is L with shape (N, D1, D2, ..., Dk).\n* If reduction = 'sum', the output is scalar: Sum(L).\n* If reduction = 'mean', the output is scalar: ReduceMean(L), or if weight is provided: `ReduceSum(L) / ReduceSum(W)`,\n where tensor W is of shape `(N, D1, D2, ..., Dk)` and `W[n][d1][d2]...[dk] = weights[labels[i][d1][d2]...[dk]]`.\n", "arguments": [ "scores", "labels", @@ -1254,7 +1254,7 @@ "expression_string": "onnx_ops.split(input, split, axis)" }, "onnx::SplitToSequence": { - "description": "Split a tensor into a sequence of tensors, along the specified\n'axis'. Lengths of the parts can be specified using argument 'split'.\n'split' must contain only positive numbers.\n'split' is either a scalar (tensor of empty shape), or a 1-D tensor.\nIf 'split' is a scalar, then 'input' will be split into equally sized chunks(if possible).\nLast chunk will be smaller if the 'input' size along the given axis 'axis' is not divisible\nby 'split'.\nOtherwise, the tensor is split into 'size(split)' chunks, with lengths of the parts on 'axis'\nspecified in 'split'. In this scenario, the sum of entries in 'split' must be equal to the\ndimension size of input tensor on 'axis'.\n", + "description": "\nSplit a tensor into a sequence of tensors, along the specified 'axis'.\nLengths of the parts can be specified using the optional argument 'split'.\nIf the argument `split' is not specified, a default scalar value of 1\nis used as the value of `split'.\n'split' must contain only positive numbers.\n'split' is either a scalar (tensor of empty shape), or a 1-D tensor.\nIf 'split' is a scalar, then 'input' will be split into chunks all of size 'split'\nif possible. The last chunk alone may be smaller than 'split' if the 'input' size\nalong the given axis 'axis' is not divisible by 'split'.\nIf 'split' is a 1-dimensional tensor, the input tensor is split into 'size(split)' chunks,\nwith lengths of the parts on 'axis' specified in 'split'. In this scenario, the sum of entries\nin 'split' must be equal to the dimension size of input tensor on 'axis'.\n", "arguments": [ "input", "split" @@ -1335,7 +1335,7 @@ "expression_string": "onnx_ops.tile(input, repeats)" }, "onnx::TopK": { - "description": "\nRetrieve the top-K largest or smallest elements along a specified axis. Given an input tensor of\nshape [a_1, a_2, ..., a_n, r] and integer argument k, return two outputs:\n -Value tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n]\n which contains the values of the top k elements along the specified axis\n -Index tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] which\n contains the indices of the top k elements (original indices from the input\n tensor).\n\nIf \"largest\" is 1 (the default value) then the k largest elements are returned.\nIf \"sorted\" is 1 (the default value) then the resulting k elements will be sorted.\nIf \"sorted\" is 0, order of returned 'Values' and 'Indices' are undefined.\n\nGiven two equivalent values, this operator uses the indices along the axis as\n a tiebreaker. That is, the element with the lower index will appear first.\n", + "description": "\nRetrieve the top-K largest or smallest elements along a specified axis. Given an input tensor of\nshape [a_1, a_2, ..., a_n, r] and integer argument k, return two outputs:\n\n* Value tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n]\n which contains the values of the top k elements along the specified axis\n* Index tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] which\n contains the indices of the top k elements (original indices from the input\n tensor).\n\n* If \"largest\" is 1 (the default value) then the k largest elements are returned.\n* If \"sorted\" is 1 (the default value) then the resulting k elements will be sorted.\n* If \"sorted\" is 0, order of returned 'Values' and 'Indices' are undefined.\n\nGiven two equivalent values, this operator uses the indices along the axis as\na tiebreaker. That is, the element with the lower index will appear first.\n", "arguments": [ "X", "K" @@ -1358,14 +1358,14 @@ "expression_string": "onnx_ops.trilu(input, k, upper)" }, "onnx::Unique": { - "description": "\nFind the unique elements of a tensor. When an optional attribute 'axis' is provided, unique subtensors sliced along the 'axis' are returned.\nOtherwise the input tensor is flattened and unique values of the flattened tensor are returned.\n\nThis operator returns the unique values or sliced unique subtensors of the input tensor and three optional outputs.\nThe first output tensor 'Y' contains all unique values or subtensors of the input.\nThe second optional output tensor 'indices' contains indices of 'Y' elements' first occurance in 'X'..\nThe third optional output tensor 'inverse_indices' contains, for elements of 'X', its corresponding indices in 'Y'. \".\nThe fourth optional output tensor 'counts' contains the count of each element of 'Y' in the input.\n\nOutputs are either sorted in ascending order or optionally in the order of the first occurrence of the values in the input.\n\nhttps://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html\n\nExample 1:\n input_X = [2, 1, 1, 3, 4, 3]\n attribute_sorted = 0\n attribute_axis = None\n output_Y = [2, 1, 3, 4]\n output_indices = [0, 1, 3, 4]\n output_inverse_indices = [0, 1, 1, 2, 3, 2]\n output_counts = [1, 2, 2, 1]\n\nExample 2:\n input_X = [[1, 3], [2, 3]]\n attribute_sorted = 1\n attribute_axis = None\n output_Y = [1, 2, 3]\n output_indices = [0, 2, 1]\n output_inverse_indices = [0, 2, 1, 2]\n output_counts = [1, 1, 2]\n\nExample 3:\n input_X = [[1, 0, 0], [1, 0, 0], [2, 3, 4]]\n attribute_sorted = 1\n attribute_axis = 0\n output_Y = [[1, 0, 0], [2, 3, 4]]\n output_indices = [0, 2]\n output_inverse_indices = [0, 0, 1]\n output_counts = [2, 1]\n\nExample 4:\n input_x = [[[1., 1.], [0., 1.], [2., 1.], [0., 1.]],\n [[1., 1.], [0., 1.], [2., 1.], [0., 1.]]]\n attribute_sorted = 1\n attribute_axis = 1\n\n intermediate data are presented below for better understanding:\n\n there are 4 subtensors sliced along axis 1 of input_x (shape = (2, 4, 2)):\n A: [[1, 1], [1, 1]],\n [[0, 1], [0, 1]],\n [[2, 1], [2, 1]],\n [[0, 1], [0, 1]].\n\n there are 3 unique subtensors:\n [[1, 1], [1, 1]],\n [[0, 1], [0, 1]],\n [[2, 1], [2, 1]].\n\n sorted unique subtensors:\n B: [[0, 1], [0, 1]],\n [[1, 1], [1, 1]],\n [[2, 1], [2, 1]].\n\n output_Y is constructed from B:\n [[[0. 1.], [1. 1.], [2. 1.]],\n [[0. 1.], [1. 1.], [2. 1.]]]\n\n output_indices is to map from B to A:\n [1, 0, 2]\n\n output_inverse_indices is to map from A to B:\n [1, 0, 2, 0]\n\n output_counts = [2 1 1]\n", + "description": "\nFind the unique elements of a tensor. When an optional attribute 'axis' is provided, unique subtensors sliced along the 'axis' are returned.\nOtherwise the input tensor is flattened and unique values of the flattened tensor are returned.\n\nThis operator returns the unique values or sliced unique subtensors of the input tensor and three optional outputs.\nThe first output tensor 'Y' contains all unique values or subtensors of the input.\nThe second optional output tensor 'indices' contains indices of 'Y' elements' first occurance in 'X'..\nThe third optional output tensor 'inverse_indices' contains, for elements of 'X', its corresponding indices in 'Y'. \".\nThe fourth optional output tensor 'counts' contains the count of each element of 'Y' in the input.\n\nOutputs are either sorted in ascending order or optionally in the order of the first occurrence of the values in the input.\n\nhttps://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html\n\nExample 1:\n```\ninput_X = [2, 1, 1, 3, 4, 3]\nattribute_sorted = 0\nattribute_axis = None\noutput_Y = [2, 1, 3, 4]\noutput_indices = [0, 1, 3, 4]\noutput_inverse_indices = [0, 1, 1, 2, 3, 2]\noutput_counts = [1, 2, 2, 1]\n```\n\nExample 2:\n```\ninput_X = [[1, 3], [2, 3]]\nattribute_sorted = 1\nattribute_axis = None\noutput_Y = [1, 2, 3]\noutput_indices = [0, 2, 1]\noutput_inverse_indices = [0, 2, 1, 2]\noutput_counts = [1, 1, 2]\n```\n\nExample 3:\n```\ninput_X = [[1, 0, 0], [1, 0, 0], [2, 3, 4]]\nattribute_sorted = 1\nattribute_axis = 0\noutput_Y = [[1, 0, 0], [2, 3, 4]]\noutput_indices = [0, 2]\noutput_inverse_indices = [0, 0, 1]\noutput_counts = [2, 1]\n```\n\nExample 4:\n```\ninput_x = [[[1., 1.], [0., 1.], [2., 1.], [0., 1.]],\n [[1., 1.], [0., 1.], [2., 1.], [0., 1.]]]\nattribute_sorted = 1\nattribute_axis = 1\n```\n\nintermediate data are presented below for better understanding:\nthere are 4 subtensors sliced along axis 1 of input_x (shape = (2, 4, 2)):\n```\nA: [[1, 1], [1, 1]],\n [[0, 1], [0, 1]],\n [[2, 1], [2, 1]],\n [[0, 1], [0, 1]].\n```\n\nthere are 3 unique subtensors:\n```\n[[1, 1], [1, 1]],\n[[0, 1], [0, 1]],\n[[2, 1], [2, 1]].\n```\n\nsorted unique subtensors:\n```\nB: [[0, 1], [0, 1]],\n [[1, 1], [1, 1]],\n [[2, 1], [2, 1]].\n```\n\noutput_Y is constructed from B:\n```\n[[[0. 1.], [1. 1.], [2. 1.]],\n [[0. 1.], [1. 1.], [2. 1.]]]\n```\n\noutput_indices is to map from B to A:\n```\n[1, 0, 2]\n```\n\noutput_inverse_indices is to map from A to B:\n```\n[1, 0, 2, 0]\n```\n\noutput_counts:\n```\n[2, 1, 1]\n```\n", "arguments": [ "X" ], "expression_string": "onnx_ops.unique(X, axis, sorted)" }, "onnx::Unsqueeze": { - "description": "\nInsert single-dimensional entries to the shape of an input tensor (`data`).\nTakes one required input `axes` - which contains a list of dimension indices and this operator will insert a dimension of value `1` into the corresponding index of the output tensor (`expanded`).\n\nFor example:\n Given an input tensor (`data`) of shape [3, 4, 5], then\n Unsqueeze(data, axes=[0, 4]) outputs a tensor (`expanded`) containing same data as `data` but with shape [1, 3, 4, 5, 1].\n\nThe input `axes` should not contain any duplicate entries. It is an error if it contains duplicates.\nThe rank of the output tensor (`output_rank`) is the rank of the input tensor (`data`) plus the number of values in `axes`.\nEach value in `axes` should be within the (inclusive) range [-output_rank , output_rank - 1].\nThe order of values in `axes` does not matter and can come in any order.\n\n", + "description": "\nInsert single-dimensional entries to the shape of an input tensor (`data`).\nTakes one required input `axes` - which contains a list of dimension indices and this operator will insert a dimension of value `1` into the corresponding index of the output tensor (`expanded`).\n\nFor example, given an input tensor (`data`) of shape [3, 4, 5], then\nUnsqueeze(data, axes=[0, 4]) outputs a tensor (`expanded`) containing same data as `data` but with shape [1, 3, 4, 5, 1].\n\nThe input `axes` should not contain any duplicate entries. It is an error if it contains duplicates.\nThe rank of the output tensor (`output_rank`) is the rank of the input tensor (`data`) plus the number of values in `axes`.\nEach value in `axes` should be within the (inclusive) range [-output_rank , output_rank - 1].\nThe order of values in `axes` does not matter and can come in any order.\n", "arguments": [ "data", "axes" @@ -1404,14 +1404,14 @@ "goal", "retrieval" ], - "expression_string": "pattern_matching_function(productions,goal,retrieval)" + "expression_string": "actr.pattern_matching_function(productions,goal,retrieval)" }, "pattern_to_string": { "description": "Converts a pattern dictionary to a string format.", "arguments": [ "chunk" ], - "expression_string": "pattern_to_string(chunk)" + "expression_string": "actr.pattern_to_string(chunk)" }, "retrieve_chunk": { "description": "Retrieve a chunk from declarative memory given a pattern.", @@ -1420,7 +1420,7 @@ "dm_chunks", "types" ], - "expression_string": "retrieve_chunk(pattern,dm_chunks,types)" + "expression_string": "actr.retrieve_chunk(pattern,dm_chunks,types)" }, "sin": { "description": "Sine function", @@ -1460,20 +1460,20 @@ "production", "buffer" ], - "expression_string": "update_buffer(production,buffer)" + "expression_string": "actr.update_buffer(production,buffer)" }, "update_goal": { "description": "Returns a pattern to update the goal buffer with.", "arguments": [ "production" ], - "expression_string": "update_goal(production)" + "expression_string": "actr.update_goal(production)" }, "update_retrieval": { "description": "Returns a pattern to update the retrieval buffer with.", "arguments": [ "production" ], - "expression_string": "update_retrieval(production)" + "expression_string": "actr.update_retrieval(production)" } } diff --git a/docs/MDF_function_specifications.md b/docs/MDF_function_specifications.md index 9399cd35f..db58d62bb 100644 --- a/docs/MDF_function_specifications.md +++ b/docs/MDF_function_specifications.md @@ -215,34 +215,34 @@ Python version: `A * (A > 0)` ## change_goal

Modifies the current goal buffer using the given pattern.

-

change_goal(pattern, curr_goal) = change_goal(pattern,curr_goal)

+

change_goal(pattern, curr_goal) = actr.change_goal(pattern,curr_goal)

-Python version: `change_goal(pattern,curr_goal)` +Python version: `actr.change_goal(pattern,curr_goal)` ## check_termination

Function used to check if no production was selected.

-

check_termination(production) = check_termination(production)

+

check_termination(production) = actr.check_termination(production)

-Python version: `check_termination(production)` +Python version: `actr.check_termination(production)` ## chunk_to_string

Converts a chunk dictionary to a string format.

-

chunk_to_string(chunk) = chunk_to_string(chunk)

+

chunk_to_string(chunk) = actr.chunk_to_string(chunk)

-Python version: `chunk_to_string(chunk)` +Python version: `actr.chunk_to_string(chunk)` ## conflict_resolution_function

ACT-R conflict resolution function. Currently selects a production at random from the already matched productions, since utility values and learning are not implemented yet.

-

conflict_resolution_function(productions) = conflict_resolution_function(productions)

+

conflict_resolution_function(productions) = actr.conflict_resolution_function(productions)

-Python version: `conflict_resolution_function(productions)` +Python version: `actr.conflict_resolution_function(productions)` @@ -265,9 +265,9 @@ Python version: `scale * numpy.cosh(variable0)` ## drift_diffusion_integrator

Integrates the drift diffusion model for a single trial using and implementation of the using the Euler-Maruyama method. This is a proof of concept implementation and is not optimized for speed.

-

drift_diffusion_integrator(starting_point, non_decision_time, drift_rate, threshold, noise, dt) = drift_diffusion_integrator(starting_point,non_decision_time,drift_rate,threshold,noise,dt)

+

drift_diffusion_integrator(starting_point, non_decision_time, drift_rate, threshold, noise, dt) = ddm.drift_diffusion_integrator(starting_point,non_decision_time,drift_rate,threshold,noise,dt)

-Python version: `drift_diffusion_integrator(starting_point,non_decision_time,drift_rate,threshold,noise,dt)` +Python version: `ddm.drift_diffusion_integrator(starting_point,non_decision_time,drift_rate,threshold,noise,dt)` @@ -297,9 +297,9 @@ Python version: `1/(1 + numpy.exp(-1*gain*(variable0 + bias) + offset))` ## match_production

Returns True if the production's left hand side matches the given context and adds the matching bindings to the production.

-

match_production(production, context) = match_production(production,context)

+

match_production(production, context) = actr.match_production(production,context)

-Python version: `match_production(production,context)` +Python version: `actr.match_production(production,context)` @@ -440,11 +440,11 @@ Python version: `onnx_ops.anumpy.tanh(input)` subset of the input tensor according to the kernel size and downsampling the data into the output tensor Y for further processing. The output spatial shape will be following: ``` - output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1) + output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1) ``` or ``` - output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1) + output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1) ``` if ceil_mode is enabled @@ -454,12 +454,12 @@ Python version: `onnx_ops.anumpy.tanh(input)` `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following: ``` - VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i]) + VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i]) SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i]) ``` And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`: ``` - pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i] + pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i] ``` The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero).

@@ -480,8 +480,8 @@ statistics in inference mode (training_mode=False, default), and the running statistics in training mode (training_mode=True). There are multiple cases for the number of outputs, which we list below: -Output case #1: Y, running_mean, running_var (training_mode=True) -Output case #2: Y (training_mode=False) +* Output case #1: Y, running_mean, running_var (training_mode=True) +* Output case #2: Y (training_mode=False) When training_mode=False, extra outputs are invalid. The outputs are updated as follows when training_mode=True: @@ -490,17 +490,15 @@ running_mean = input_mean * momentum + current_mean * (1 - momentum) running_var = input_var * momentum + current_var * (1 - momentum) Y = (X - current_mean) / sqrt(current_var + epsilon) * scale + B - +``` where: - +``` current_mean = ReduceMean(X, axis=all_except_channel_index) current_var = ReduceVar(X, axis=all_except_channel_index) - -Notice that ReduceVar refers to the population variance, and it equals to -sum(sqrd(x_i - x_avg)) / N -where N is the population size (this formula does not use sample size N - 1). - ``` +Notice that `ReduceVar` refers to the population variance, and it equals to +`sum(sqrd(x_i - x_avg)) / N` +where `N` is the population size (this formula does not use sample size `N - 1`). The computation of ReduceMean and ReduceVar uses float to avoid overflow for float16 inputs. @@ -537,16 +535,16 @@ Python version: `onnx_ops.bernoulli(input, dtype, seed)` ## BitShift

Bitwise shift operator performs element-wise operation. For each input element, if the - attribute "direction" is "RIGHT", this operator moves its binary representation toward - the right side so that the input value is effectively decreased. If the attribute "direction" - is "LEFT", bits of binary representation moves toward the left side, which results the - increase of its actual value. The input X is the tensor to be shifted and another input - Y specifies the amounts of shifting. For example, if "direction" is "Right", X is [1, 4], - and S is [1, 1], the corresponding output Z would be [0, 2]. If "direction" is "LEFT" with - X=[1, 2] and S=[1, 2], the corresponding output Y would be [2, 8]. - - Because this operator supports Numpy-style broadcasting, X's and Y's shapes are - not necessarily identical. +attribute "direction" is "RIGHT", this operator moves its binary representation toward +the right side so that the input value is effectively decreased. If the attribute "direction" +is "LEFT", bits of binary representation moves toward the left side, which results the +increase of its actual value. The input X is the tensor to be shifted and another input +Y specifies the amounts of shifting. For example, if "direction" is "Right", X is [1, 4], +and S is [1, 1], the corresponding output Z would be [0, 2]. If "direction" is "LEFT" with +X=[1, 2] and S=[1, 2], the corresponding output Y would be [2, 8]. + +Because this operator supports Numpy-style broadcasting, X's and Y's shapes are +not necessarily identical. This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md).

Python version: `onnx_ops.bitshift(X, Y, direction)` @@ -563,7 +561,7 @@ in the 'DataType' enum field in the TensorProto message. Casting from string tensor in plain (e.g., "3.14" and "1000") and scientific numeric representations (e.g., "1e-5" and "1E8") to float types is supported. For example, converting string "100.5" to an integer may -result 100. There are some string literals reserved for special floating-point values; +yield result 100. There are some string literals reserved for special floating-point values; "+INF" (and "INF"), "-INF", and "NaN" are positive infinity, negative infinity, and not-a-number, respectively. Any string which can exactly match "+INF" in a case-insensitive way would be mapped to positive infinite. Similarly, this case-insensitive rule is applied to "INF" and "NaN". When casting from numeric tensors @@ -585,7 +583,7 @@ In more detail, the conversion among numerical types should follow these rules: * Casting from fixed point to: * floating point: +/- infinity if OOR. (+ infinity in the case of uint) * fixed point: when OOR, discard higher bits and reinterpret (with respect to two's complement representation for -signed types). For example, 200 (int16) -> -56 (int8). + signed types). For example, 200 (int16) -> -56 (int8). * bool: zero to False; nonzero to True. * Casting from bool to: * floating point: `{1.0, 0.0}`. @@ -614,7 +612,7 @@ Python version: `onnx_ops.castlike(input, target_type)`

Ceil takes one input data (Tensor) and produces one output data (Tensor) where the ceil is, y = ceil(x), is applied to -the tensor elementwise. +the tensor elementwise. If x is integral, +0, -0, NaN, or infinite, x itself is returned.

Python version: `onnx_ops.ceil(X)` @@ -803,26 +801,22 @@ and width dimensions. By default, `mode` = `DCR`. In the DCR mode, elements along the depth dimension from the input tensor are rearranged in the following order: depth, column, and then row. The output y is computed from the input x as below: +``` b, c, h, w = x.shape - tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w]) - tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2]) - y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize]) - +``` In the CRD mode, elements along the depth dimension from the input tensor are rearranged in the following order: column, row, and the depth. The output y is computed from the input x as below: +``` b, c, h, w = x.shape - tmp = np.reshape(x, [b, c // (blocksize ** 2), blocksize, blocksize, h, w]) - tmp = np.transpose(tmp, [0, 1, 4, 2, 5, 3]) - y = np.reshape(tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize]) - +```

Python version: `onnx_ops.depthtospace(input, blocksize, mode)` @@ -833,9 +827,9 @@ Python version: `onnx_ops.depthtospace(input, blocksize, mode)` ## DequantizeLinear

The linear dequantization operator. It consumes a quantized tensor, a scale, and a zero point to compute the full precision tensor. -The dequantization formula is y = (x - x_zero_point) * x_scale. 'x_scale' and 'x_zero_point' must have same shape, and can be either a scalar +The dequantization formula is `y = (x - x_zero_point) * x_scale`. `x_scale` and `x_zero_point` must have same shape, and can be either a scalar for per-tensor / per layer quantization, or a 1-D tensor for per-axis quantization. -'x_zero_point' and 'x' must have same type. 'x' and 'y' must have same shape. In the case of dequantizing int32, +`x_zero_point` and `x` must have same type. `x` and `y` must have same shape. In the case of dequantizing int32, there's no zero point (zero point is supposed to be 0).

@@ -899,24 +893,29 @@ A Function to fuse calculation for Scale, Zero Point and FP32->8Bit convertion o Outputs Scale, ZeroPoint and Quantized Input for a given FP32 Input. Scale is calculated as: ``` - y_scale = (max(x) - min(x))/(qmax - qmin) - * where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8 - * data range is adjusted to include 0. +y_scale = (max(x) - min(x))/(qmax - qmin) ``` + +* where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8 +* data range is adjusted to include 0. + Zero point is calculated as: ``` intermediate_zero_point = qmin - min(x)/y_scale y_zero_point = cast(round(saturate(itermediate_zero_point))) +``` + * where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8 * for saturation, it saturates to [0, 255] if it's uint8, or [-127, 127] if it's int8. Right now only uint8 is supported. * rounding to nearest ties to even. -``` + Data quantization formula is: ``` y = saturate (round (x / y_scale) + y_zero_point) +``` + * for saturation, it saturates to [0, 255] if it's uint8, or [-127, 127] if it's int8. Right now only uint8 is supported. * rounding to nearest ties to even. -```

Python version: `onnx_ops.dynamicquantizelinear(x)` @@ -926,9 +925,11 @@ Python version: `onnx_ops.dynamicquantizelinear(x)` ## Einsum

-An einsum of the form ```term1, term2 -> output-term``` produces an output tensor using the following equation +An einsum of the form `term1, term2 -> output-term` produces an output tensor using the following equation -```output[output-term] = reduce-sum( input1[term1] * input2[term] )``` +``` +output[output-term] = reduce-sum( input1[term1] * input2[term] ) +``` where the reduce-sum performs a summation over all the indices occurring in the input terms (term1, term2) that do not occur in the output-term. @@ -1051,7 +1052,7 @@ Python version: `onnx_ops.flatten(input, axis)`

Floor takes one input data (Tensor) and produces one output data (Tensor) where the floor is, y = floor(x), is applied to -the tensor elementwise. +the tensor elementwise. If x is integral, +0, -0, NaN, or infinite, x itself is returned.

Python version: `onnx_ops.floor(X)` @@ -1066,73 +1067,47 @@ implementation such as CuDNN. Notations: -`X` - input tensor - -`z` - update gate - -`r` - reset gate - -`h` - hidden gate - -`t` - time step (t-1 means previous time step) - -`W[zrh]` - W parameter weight matrix for update, reset, and hidden gates - -`R[zrh]` - R recurrence weight matrix for update, reset, and hidden gates - -`Wb[zrh]` - W bias vectors for update, reset, and hidden gates - -`Rb[zrh]` - R bias vectors for update, reset, and hidden gates - -`WB[zrh]` - W parameter weight matrix for backward update, reset, and hidden gates - -`RB[zrh]` - R recurrence weight matrix for backward update, reset, and hidden gates - -`WBb[zrh]` - W bias vectors for backward update, reset, and hidden gates - -`RBb[zrh]` - R bias vectors for backward update, reset, and hidden gates - -`H` - Hidden state - -`num_directions` - 2 if direction == bidirectional else 1 +* `X` - input tensor +* `z` - update gate +* `r` - reset gate +* `h` - hidden gate +* `t` - time step (t-1 means previous time step) +* `W[zrh]` - W parameter weight matrix for update, reset, and hidden gates +* `R[zrh]` - R recurrence weight matrix for update, reset, and hidden gates +* `Wb[zrh]` - W bias vectors for update, reset, and hidden gates +* `Rb[zrh]` - R bias vectors for update, reset, and hidden gates +* `WB[zrh]` - W parameter weight matrix for backward update, reset, and hidden gates +* `RB[zrh]` - R recurrence weight matrix for backward update, reset, and hidden gates +* `WBb[zrh]` - W bias vectors for backward update, reset, and hidden gates +* `RBb[zrh]` - R bias vectors for backward update, reset, and hidden gates +* `H` - Hidden state +* `num_directions` - 2 if direction == bidirectional else 1 Activation functions: - Relu(x) - max(0, x) - - Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) - - Sigmoid(x) - 1/(1 + e^{-x}) - - (NOTE: Below are optional) +* Relu(x) - max(0, x) +* Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) +* Sigmoid(x) - 1/(1 + e^{-x}) - Affine(x) - alpha*x + beta +NOTE: + Below are optional - LeakyRelu(x) - x if x >= 0 else alpha * x - - ThresholdedRelu(x) - x if x >= alpha else 0 - - ScaledTanh(x) - alpha*Tanh(beta*x) - - HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) - - Elu(x) - x if x >= 0 else alpha*(e^x - 1) - - Softsign(x) - x/(1 + |x|) - - Softplus(x) - log(1 + e^x) +* Affine(x) - alpha * x + beta +* LeakyRelu(x) - x if x >= 0 else alpha * x +* ThresholdedRelu(x) - x if x >= alpha else 0 +* ScaledTanh(x) - alpha * Tanh(beta * x) +* HardSigmoid(x) - min(max(alpha * x + beta, 0), 1) +* Elu(x) - x if x >= 0 else alpha * (e^x - 1) +* Softsign(x) - x/(1 + |x|) +* Softplus(x) - log(1 + e^x) Equations (Default: f=Sigmoid, g=Tanh): - - zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz + Rbz) - - - rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr) - - - ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh) # default, when linear_before_reset = 0 - - - ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset != 0 - - - Ht = (1 - zt) (.) ht + zt (.) Ht-1 +* zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz + Rbz) +* rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr) +* ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh) # default, when linear_before_reset = 0 +* ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset != 0 +* Ht = (1 - zt) (.) ht + zt (.) Ht-1 This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

@@ -1147,56 +1122,49 @@ Given `data` tensor of rank r >= 1, and `indices` tensor of rank q, gather entries of the axis dimension of `data` (by default outer-most one as axis=0) indexed by `indices`, and concatenates them in an output tensor of rank q + (r - 1). -axis = 0 : - -Let -k = indices[i_{0}, ..., i_{q-1}] -Then -output[i_{0}, ..., i_{q-1}, j_{0}, ..., j_{r-2}] = input[k , j_{0}, ..., j_{r-2}] +If `axis = 0`, let `k = indices[i_{0}, ..., i_{q-1}]` +then `output[i_{0}, ..., i_{q-1}, j_{0}, ..., j_{r-2}] = input[k , j_{0}, ..., j_{r-2}]`: ``` - data = [ - [1.0, 1.2], - [2.3, 3.4], - [4.5, 5.7], - ] - indices = [ - [0, 1], - [1, 2], - ] - output = [ - [ - [1.0, 1.2], - [2.3, 3.4], - ], - [ - [2.3, 3.4], - [4.5, 5.7], - ], - ] +data = [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], +] +indices = [ + [0, 1], + [1, 2], +] +output = [ + [ + [1.0, 1.2], + [2.3, 3.4], + ], + [ + [2.3, 3.4], + [4.5, 5.7], + ], +] ``` -axis = 1 : -Let -k = indices[i_{0}, ..., i_{q-1}] -Then -output[j_{0}, i_{0}, ..., i_{q-1}, j_{1}, ..., j_{r-2}] = input[j_{0}, k, j_{1}, ..., j_{r-2}] +If `axis = 1`, let `k = indices[i_{0}, ..., i_{q-1}]` +then `output[j_{0}, i_{0}, ..., i_{q-1}, j_{1}, ..., j_{r-2}] = input[j_{0}, k, j_{1}, ..., j_{r-2}]`: ``` - data = [ - [1.0, 1.2, 1.9], - [2.3, 3.4, 3.9], - [4.5, 5.7, 5.9], - ] - indices = [ - [0, 2], - ] - axis = 1, - output = [ - [[1.0, 1.9]], - [[2.3, 3.9]], - [[4.5, 5.9]], - ] +data = [ + [1.0, 1.2, 1.9], + [2.3, 3.4, 3.9], + [4.5, 5.7, 5.9], +] +indices = [ + [0, 2], +] +axis = 1, +output = [ + [[1.0, 1.9]], + [[2.3, 3.9]], + [[4.5, 5.9]], +] ```

@@ -1219,45 +1187,45 @@ Its output shape is the same as the shape of `indices` and consists of one value For instance, in the 3-D case (r = 3), the output produced is determined by the following equations: ``` - out[i][j][k] = input[index[i][j][k]][j][k] if axis = 0, - out[i][j][k] = input[i][index[i][j][k]][k] if axis = 1, - out[i][j][k] = input[i][j][index[i][j][k]] if axis = 2, +out[i][j][k] = input[index[i][j][k]][j][k] if axis = 0, +out[i][j][k] = input[i][index[i][j][k]][k] if axis = 1, +out[i][j][k] = input[i][j][index[i][j][k]] if axis = 2, ``` This operator is also the inverse of ScatterElements. It is similar to Torch's gather operation. Example 1: ``` - data = [ - [1, 2], - [3, 4], - ] - indices = [ - [0, 0], - [1, 0], - ] - axis = 1 - output = [ - [1, 1], - [4, 3], - ] +data = [ + [1, 2], + [3, 4], +] +indices = [ + [0, 0], + [1, 0], +] +axis = 1 +output = [ + [1, 1], + [4, 3], +] ``` Example 2: ``` - data = [ - [1, 2, 3], - [4, 5, 6], - [7, 8, 9], - ] - indices = [ - [1, 2, 0], - [2, 0, 0], - ] - axis = 0 - output = [ - [4, 8, 3], - [7, 2, 3], - ] +data = [ + [1, 2, 3], + [4, 5, 6], + [7, 8, 9], +] +indices = [ + [1, 2, 0], + [2, 0, 0], +] +axis = 0 +output = [ + [4, 8, 3], + [7, 2, 3], +] ```

@@ -1370,9 +1338,8 @@ Python version: `onnx_ops.gathernd(data, indices, batch_dims)`

General Matrix multiplication: https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3 -A' = transpose(A) if transA else A - -B' = transpose(B) if transB else B +* A' = transpose(A) if transA else A +* B' = transpose(B) if transB else B Compute Y = alpha * A' * B' + beta * C, where input tensor A has shape (M, K) or (K, M), input tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N), @@ -1529,14 +1496,14 @@ Python version: `onnx_ops.isnan(X)`

Local Response Normalization proposed in the [AlexNet paper](https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf). It normalizes over local input regions. -The local region is defined across the channels. For an element X[n, c, d1, ..., dk] in a tensor -of shape (N x C x D1 x D2, ..., Dk), its region is -{X[n, i, d1, ..., dk] | max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2))}. +The local region is defined across the channels. For an element `X[n, c, d1, ..., dk]` in a tensor +of shape `(N x C x D1 x D2, ..., Dk)`, its region is +`{X[n, i, d1, ..., dk] | max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2))}`. -square_sum[n, c, d1, ..., dk] = sum(X[n, i, d1, ..., dk] ^ 2), -where max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2)). +`square_sum[n, c, d1, ..., dk] = sum(X[n, i, d1, ..., dk] ^ 2)`, +where `max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2))`. -Y[n, c, d1, ..., dk] = X[n, c, d1, ..., dk] / (bias + alpha / size * square_sum[n, c, d1, ..., dk] ) ^ beta +`Y[n, c, d1, ..., dk] = X[n, c, d1, ..., dk] / (bias + alpha / size * square_sum[n, c, d1, ..., dk] ) ^ beta`

Python version: `onnx_ops.lrn(X, alpha, beta, bias, size)` @@ -1551,81 +1518,50 @@ custom implementation such as CuDNN. Notations: -`X` - input tensor - -`i` - input gate - -`o` - output gate - -`f` - forget gate - -`c` - cell gate - -`t` - time step (t-1 means previous time step) - -`W[iofc]` - W parameter weight matrix for input, output, forget, and cell gates - -`R[iofc]` - R recurrence weight matrix for input, output, forget, and cell gates - -`Wb[iofc]` - W bias vectors for input, output, forget, and cell gates - -`Rb[iofc]` - R bias vectors for input, output, forget, and cell gates - -`P[iof]` - P peephole weight vector for input, output, and forget gates - -`WB[iofc]` - W parameter weight matrix for backward input, output, forget, and cell gates - -`RB[iofc]` - R recurrence weight matrix for backward input, output, forget, and cell gates - -`WBb[iofc]` - W bias vectors for backward input, output, forget, and cell gates - -`RBb[iofc]` - R bias vectors for backward input, output, forget, and cell gates - -`PB[iof]` - P peephole weight vector for backward input, output, and forget gates - -`H` - Hidden state - -`num_directions` - 2 if direction == bidirectional else 1 +* `X` - input tensor +* `i` - input gate +* `o` - output gate +* `f` - forget gate +* `c` - cell gate +* `t` - time step (t-1 means previous time step) +* `W[iofc]` - W parameter weight matrix for input, output, forget, and cell gates +* `R[iofc]` - R recurrence weight matrix for input, output, forget, and cell gates +* `Wb[iofc]` - W bias vectors for input, output, forget, and cell gates +* `Rb[iofc]` - R bias vectors for input, output, forget, and cell gates +* `P[iof]` - P peephole weight vector for input, output, and forget gates +* `WB[iofc]` - W parameter weight matrix for backward input, output, forget, and cell gates +* `RB[iofc]` - R recurrence weight matrix for backward input, output, forget, and cell gates +* `WBb[iofc]` - W bias vectors for backward input, output, forget, and cell gates +* `RBb[iofc]` - R bias vectors for backward input, output, forget, and cell gates +* `PB[iof]` - P peephole weight vector for backward input, output, and forget gates +* `H` - Hidden state +* `num_directions` - 2 if direction == bidirectional else 1 Activation functions: - Relu(x) - max(0, x) - - Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) - - Sigmoid(x) - 1/(1 + e^{-x}) - - (NOTE: Below are optional) +* Relu(x) - max(0, x) +* Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) +* Sigmoid(x) - 1/(1 + e^{-x}) - Affine(x) - alpha*x + beta +NOTE: Below are optional - LeakyRelu(x) - x if x >= 0 else alpha * x - - ThresholdedRelu(x) - x if x >= alpha else 0 - - ScaledTanh(x) - alpha*Tanh(beta*x) - - HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) - - Elu(x) - x if x >= 0 else alpha*(e^x - 1) - - Softsign(x) - x/(1 + |x|) - - Softplus(x) - log(1 + e^x) +* Affine(x) - alpha*x + beta +* LeakyRelu(x) - x if x >= 0 else alpha * x +* ThresholdedRelu(x) - x if x >= alpha else 0 +* ScaledTanh(x) - alpha*Tanh(beta*x) +* HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) +* Elu(x) - x if x >= 0 else alpha*(e^x - 1) +* Softsign(x) - x/(1 + |x|) +* Softplus(x) - log(1 + e^x) Equations (Default: f=Sigmoid, g=Tanh, h=Tanh): - - it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi) - - - ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf) - - - ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc) - - - Ct = ft (.) Ct-1 + it (.) ct - - - ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo) - - - Ht = ot (.) h(Ct) +* it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi) +* ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf) +* ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc) +* Ct = ft (.) Ct-1 + it (.) ct +* ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo) +* Ht = ot (.) h(Ct) This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

@@ -1768,11 +1704,7 @@ Python version: `onnx_ops.max(data_0)` ``` output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1) ``` - if ceil_mode is enabled - - ``` - * pad_shape[i] is sum of pads along axis i - ``` + if ceil_mode is enabled `pad_shape[i]` is the sum of pads along axis `i`. `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following: ``` @@ -1844,7 +1776,7 @@ Python version: `onnx_ops.mean(data_0)` ## MeanVarianceNormalization

A MeanVarianceNormalization Function: Perform mean variance normalization - on the input tensor X using formula:
``` (X-EX)/sqrt(E(X-EX)^2) ``` + on the input tensor X using formula: `(X-EX)/sqrt(E(X-EX)^2)`

Python version: `onnx_ops.meanvariancenormalization(X, axes)` @@ -1867,16 +1799,16 @@ Python version: `onnx_ops.min(data_0)` ## Mod

Performs element-wise binary modulus (with Numpy-style broadcasting support). - The sign of the remainder is the same as that of the Divisor. + The sign of the remainder is the same as that of the Divisor. - Mod operator can also behave like C fmod() or numpy.fmod. In this case, the sign of the remainder however, will be the same as the Dividend - (in contrast to integer mod). To force a behavior like numpy.fmod() an 'fmod' Attribute is provided. - This attribute is set to 0 by default causing the behavior to be like integer mod. - Setting this attribute to 1 causes the remainder to be calculated similar to that of numpy.fmod(). + Mod operator can also behave like C fmod() or numpy.fmod. In this case, the sign of the remainder however, will be the same as the Dividend + (in contrast to integer mod). To force a behavior like numpy.fmod() an 'fmod' Attribute is provided. + This attribute is set to 0 by default causing the behavior to be like integer mod. + Setting this attribute to 1 causes the remainder to be calculated similar to that of numpy.fmod(). - If the input type is floating point, then `fmod` attribute must be set to 1. + If the input type is floating point, then `fmod` attribute must be set to 1. - In case of dividend being zero, the results will be platform dependent. + In case of dividend being zero, the results will be platform dependent. This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md).

@@ -1932,85 +1864,100 @@ The operator's "target" input tensor has the shape of (N, d1, d2, ..., dk). It e or it may contain a special value (indicated by an attribute ignore_index) for N x d1 x d2 x ... x dk samples. The loss value for input[n, :, d_1, d_2,...d_k] being classified as class c = target[n][d_1][d_2]...[d_k] is computed as: - loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k]. +``` +loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k]. +``` When an optional "weight" is provided, the sample loss is calculated as: - loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k] * weight[c]. +``` +loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k] * weight[c]. +``` loss is zero for the case when target-value equals ignore_index. - loss[n][d_1][d_2]...[d_k] = 0, when target[n][d_1][d_2]...[d_k] = ignore_index +``` +loss[n][d_1][d_2]...[d_k] = 0, when target[n][d_1][d_2]...[d_k] = ignore_index +``` If "reduction" attribute is set to "none", the operator's output will be the above loss with shape (N, d1, d2, ..., dk). If "reduction" attribute is set to "mean" (the default attribute value), the output loss is (weight) averaged: - mean(loss), if "weight" is not provided, +``` +mean(loss), if "weight" is not provided, +``` or if weight is provided, - sum(loss) / sum(weight[target[n][d_1][d_2]...[d_k]]]), for all samples. +``` +sum(loss) / sum(weight[target[n][d_1][d_2]...[d_k]]]), for all samples. +``` -If "reduction" attribute is set to "sum", the output is a scalar: - sum(loss). +If "reduction" attribute is set to "sum", the output is a scalar: `sum(loss)`. See also https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss. Example 1: - // negative log likelihood loss, "none" reduction - N, C, d1 = 2, 3, 2 - input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], - [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] - target = [[2, 1], [0, 2]] - - loss = np.zeros((N, d1)) - for n in range(N): - for d_1 in range(d1): - c = target[n][d_1] - loss[n][d_1] = -input[n][c][d_1] - - // print(loss) - // [[-3. -2.] - // [-0. -2.]] +``` +// negative log likelihood loss, "none" reduction +N, C, d1 = 2, 3, 2 +input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], + [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] +target = [[2, 1], [0, 2]] + +loss = np.zeros((N, d1)) +for n in range(N): + for d_1 in range(d1): + c = target[n][d_1] + loss[n][d_1] = -input[n][c][d_1] + +// print(loss) +// [[-3. -2.] +// [-0. -2.]] +``` Example 2: - // weighted negative log likelihood loss, sum reduction - N, C, d1 = 2, 3, 2 - input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], - [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] - target = [[2, 1], [0, 2]] - weight = [0.2, 0.3, 0.1] - loss = np.zeros((N, d1)) - for n in range(N): - for d_1 in range(d1): - c = target[n][d_1] - loss[n][d_1] = -input[n][c][d_1] * weight[c] - - loss = np.sum(loss) - // print(loss) - // -1.1 +``` +// weighted negative log likelihood loss, sum reduction +N, C, d1 = 2, 3, 2 +input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], + [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] +target = [[2, 1], [0, 2]] +weight = [0.2, 0.3, 0.1] +loss = np.zeros((N, d1)) +for n in range(N): + for d_1 in range(d1): + c = target[n][d_1] + loss[n][d_1] = -input[n][c][d_1] * weight[c] + +loss = np.sum(loss) +// print(loss) +// -1.1 +``` Example 3: - // weighted negative log likelihood loss, mean reduction - N, C, d1 = 2, 3, 2 - input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], - [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] - target = [[2, 1], [0, 2]] - weight = [0.2, 0.3, 0.1] - loss = np.zeros((N, d1)) - weight_total = 0 - for n in range(N): - for d_1 in range(d1): - c = target[n][d_1] - loss[n][d_1] = -input[n][c][d_1] * weight[c] - weight_total = weight_total + weight[c] - - loss = np.sum(loss) / weight_total - // print(loss) - // -1.57 +``` +// weighted negative log likelihood loss, mean reduction +N, C, d1 = 2, 3, 2 +input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], + [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] +target = [[2, 1], [0, 2]] +weight = [0.2, 0.3, 0.1] +loss = np.zeros((N, d1)) +weight_total = 0 +for n in range(N): + for d_1 in range(d1): + c = target[n][d_1] + loss[n][d_1] = -input[n][c][d_1] * weight[c] + weight_total = weight_total + weight[c] + +loss = np.sum(loss) / weight_total +// print(loss) +// -1.57 +```

Python version: `onnx_ops.negativeloglikelihoodloss(input, target, weight, ignore_index, reduction)` @@ -2281,7 +2228,7 @@ The linear quantization operator. It consumes a high precision tensor, a scale, The scale factor and zero point must have same shape, and can be either a scalar for per-tensor / per layer quantization, or a 1-D tensor for per-axis quantization. The quantization formula is y = saturate ((x / y_scale) + y_zero_point). For saturation, it saturates to [0, 255] if it's uint8, or [-128, 127] if it's int8. -For (x / y_scale), it's rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details. 'y_zero_point' and 'y' must have same type. +For (x / y_scale), it's rounding to the nearest even. Refer to https://en.wikipedia.org/wiki/Rounding for details. 'y_zero_point' and 'y' must have same type.

Python version: `onnx_ops.quantizelinear(x, y_scale, y_zero_point, axis)` @@ -2296,61 +2243,40 @@ via some custom implementation such as CuDNN. Notations: -`X` - input tensor - -`i` - input gate - -`t` - time step (t-1 means previous time step) - -`Wi` - W parameter weight matrix for input gate - -`Ri` - R recurrence weight matrix for input gate - -`Wbi` - W parameter bias vector for input gate - -`Rbi` - R parameter bias vector for input gate - -`WBi` - W parameter weight matrix for backward input gate - -`RBi` - R recurrence weight matrix for backward input gate - -`WBbi` - WR bias vectors for backward input gate - -`RBbi` - RR bias vectors for backward input gate - -`H` - Hidden state - -`num_directions` - 2 if direction == bidirectional else 1 +* `X` - input tensor +* `i` - input gate +* `t` - time step (t-1 means previous time step) +* `Wi` - W parameter weight matrix for input gate +* `Ri` - R recurrence weight matrix for input gate +* `Wbi` - W parameter bias vector for input gate +* `Rbi` - R parameter bias vector for input gate +* `WBi` - W parameter weight matrix for backward input gate +* `RBi` - R recurrence weight matrix for backward input gate +* `WBbi` - WR bias vectors for backward input gate +* `RBbi` - RR bias vectors for backward input gate +* `H` - Hidden state +* `num_directions` - 2 if direction == bidirectional else 1 Activation functions: - Relu(x) - max(0, x) - - Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) - - Sigmoid(x) - 1/(1 + e^{-x}) - - (NOTE: Below are optional) - - Affine(x) - alpha*x + beta - - LeakyRelu(x) - x if x >= 0 else alpha * x - - ThresholdedRelu(x) - x if x >= alpha else 0 +* Relu(x) - max(0, x) +* Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) +* Sigmoid(x) - 1/(1 + e^{-x}) - ScaledTanh(x) - alpha*Tanh(beta*x) +NOTE: Below are optional - HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) - - Elu(x) - x if x >= 0 else alpha*(e^x - 1) - - Softsign(x) - x/(1 + |x|) - - Softplus(x) - log(1 + e^x) +* Affine(x) - alpha*x + beta +* LeakyRelu(x) - x if x >= 0 else alpha * x +* ThresholdedRelu(x) - x if x >= alpha else 0 +* ScaledTanh(x) - alpha*Tanh(beta*x) +* HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) +* Elu(x) - x if x >= 0 else alpha*(e^x - 1) +* Softsign(x) - x/(1 + |x|) +* Softplus(x) - log(1 + e^x) Equations (Default: f=Tanh): - - Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi) +* Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi) This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

@@ -2427,28 +2353,33 @@ Python version: `onnx_ops.randomuniformlike(input, dtype, high, low, seed)` Generate a tensor containing a sequence of numbers that begin at `start` and extends by increments of `delta` up to `limit` (exclusive). -The number of elements in the output of range is computed as below- - -`number_of_elements = max( ceil( (limit - start) / delta ) , 0 )` +The number of elements in the output of range is computed as below: -The pseudocode determining the contents of the output is shown below- - -`for(int i=0; i

Python version: `onnx_ops.range(start, limit, delta)` @@ -2470,9 +2401,10 @@ Python version: `onnx_ops.reciprocal(X)` ## ReduceL1

-Computes the L1 norm of the input tensor's element along the provided axes. The resulting +Computes the L1 norm of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then -the resulting tensor has the reduced dimension pruned. +the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True.

@@ -2484,9 +2416,10 @@ Python version: `onnx_ops.reducel1(data, axes, keepdims)` ## ReduceL2

-Computes the L2 norm of the input tensor's element along the provided axes. The resulting +Computes the L2 norm of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then -the resulting tensor has the reduced dimension pruned. +the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True.

@@ -2498,9 +2431,10 @@ Python version: `onnx_ops.reducel2(data, axes, keepdims)` ## ReduceLogSum

-Computes the log sum of the input tensor's element along the provided axes. The resulting +Computes the log sum of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then -the resulting tensor has the reduced dimension pruned. +the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True.

@@ -2512,9 +2446,10 @@ Python version: `onnx_ops.reducelogsum(data, axes, keepdims)` ## ReduceLogSumExp

-Computes the log sum exponent of the input tensor's element along the provided axes. The resulting +Computes the log sum exponent of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then -the resulting tensor has the reduced dimension pruned. +the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True.

@@ -2526,9 +2461,10 @@ Python version: `onnx_ops.reducelogsumnumpy.exp(data, axes, keepdims)` ## ReduceMax

-Computes the max of the input tensor's element along the provided axes. The resulting +Computes the max of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then -the resulting tensor has the reduced dimension pruned. +the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True.

@@ -2540,9 +2476,10 @@ Python version: `onnx_ops.reducemax(data, axes, keepdims)` ## ReduceMean

-Computes the mean of the input tensor's element along the provided axes. The resulting +Computes the mean of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then -the resulting tensor has the reduced dimension pruned. +the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True.

@@ -2554,9 +2491,10 @@ Python version: `onnx_ops.reducemean(data, axes, keepdims)` ## ReduceMin

-Computes the min of the input tensor's element along the provided axes. The resulting +Computes the min of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then -the resulting tensor has the reduced dimension pruned. +the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True.

@@ -2568,9 +2506,10 @@ Python version: `onnx_ops.reducemin(data, axes, keepdims)` ## ReduceProd

-Computes the product of the input tensor's element along the provided axes. The resulting +Computes the product of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then -the resulting tensor has the reduced dimension pruned. +the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True.

@@ -2582,9 +2521,10 @@ Python version: `onnx_ops.reduceprod(data, axes, keepdims)` ## ReduceSum

-Computes the sum of the input tensor's element along the provided axes. The resulting +Computes the sum of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then -the resulting tensor has the reduced dimension pruned. +the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True.

@@ -2596,9 +2536,10 @@ Python version: `onnx_ops.reducesum(data, axes, keepdims, noop_with_empty_axes)` ## ReduceSumSquare

-Computes the sum square of the input tensor's element along the provided axes. The resulting +Computes the sum square of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then -the resulting tensor has the reduced dimension pruned. +the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True.

@@ -2721,6 +2662,7 @@ Python version: `onnx_ops.roialign(X, rois, batch_indices, mode, output_height, Round takes one input Tensor and rounds the values, element-wise, meaning it finds the nearest integer for each value. In case of halfs, the rule is to round them to the nearest even integer. +If input x is integral, +0, -0, NaN, or infinite, x itself is returned. The output tensor has the same shape and type as the input. Examples: @@ -3023,23 +2965,32 @@ negative axis). Thus, specifying any end value > r is equivalent to specifying a value of r, and specifying any start value < -r is equivalent to specifying a start value of 0. -For example: +Examples: + +``` Input tensor with shape: [2, 3, 4] No attributes specified. Output: [2, 3, 4] +``` +``` Input tensor with shape: [2, 3, 4] start: -1 Output: [4] +``` +``` Input tensor with shape: [2, 3, 4] end: -1 Output: [2, 3] +``` +``` Input tensor with shape: [2, 3, 4] start: 1 end: 2 Output: [3] +```

Python version: `onnx_ops.shape(data, end, start)` @@ -3149,27 +3100,34 @@ For slicing to the end of a dimension with unknown size, it is recommended to pa in `INT_MAX` when slicing forward and 'INT_MIN' when slicing backward. Example 1: - data = [ - [1, 2, 3, 4], - [5, 6, 7, 8], - ] - axes = [0, 1] - starts = [1, 0] - ends = [2, 3] - steps = [1, 2] - result = [ - [5, 7], - ] + +``` +data = [ + [1, 2, 3, 4], + [5, 6, 7, 8], +] +axes = [0, 1] +starts = [1, 0] +ends = [2, 3] +steps = [1, 2] +result = [ + [5, 7], +] +``` + Example 2: - data = [ - [1, 2, 3, 4], - [5, 6, 7, 8], - ] - starts = [0, 1] - ends = [-1, 1000] - result = [ - [2, 3, 4], - ] + +``` +data = [ + [1, 2, 3, 4], + [5, 6, 7, 8], +] +starts = [0, 1] +ends = [-1, 1000] +result = [ + [2, 3, 4], +] +```

Python version: `onnx_ops.slice(data, starts, ends, axes, steps)` @@ -3202,29 +3160,38 @@ If the input is N-D tensor with shape (N, C, D1, D2, ..., Dk), the loss tensor L may have (N, D1, D2, ..., Dk) as its shape and L[i,][j_1][j_2]...[j_k] denotes a scalar element in L. After L is available, this operator can optionally do a reduction operator. -shape(scores): (N, C) where C is the number of classes, or (N, C, D1, D2,..., Dk), - with K >= 1 in case of K-dimensional loss. -shape(labels): (N) where each value is 0 <= labels[i] <= C-1, or (N, D1, D2,..., Dk), - with K >= 1 in case of K-dimensional loss. +* shape(scores): (N, C) where C is the number of classes, or (N, C, D1, D2,..., Dk), + with K >= 1 in case of K-dimensional loss. +* shape(labels): (N) where each value is 0 <= labels[i] <= C-1, or (N, D1, D2,..., Dk), + with K >= 1 in case of K-dimensional loss. The loss for one sample, l_i, can caculated as follows: - l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk], where i is the index of classes. +``` +l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk], where i is the index of classes. +``` or - l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk] * weights[c], if 'weights' is provided. +``` +l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk] * weights[c], if 'weights' is provided. +``` loss is zero for the case when label-value equals ignore_index. - l[i][d1][d2]...[dk] = 0, when labels[n][d1][d2]...[dk] = ignore_index +``` +l[i][d1][d2]...[dk] = 0, when labels[n][d1][d2]...[dk] = ignore_index +``` where: - p = Softmax(scores) - y = Log(p) - c = labels[i][d1][d2]...[dk] +``` +p = Softmax(scores) +y = Log(p) +c = labels[i][d1][d2]...[dk] +``` Finally, L is optionally reduced: -If reduction = 'none', the output is L with shape (N, D1, D2, ..., Dk). -If reduction = 'sum', the output is scalar: Sum(L). -If reduction = 'mean', the output is scalar: ReduceMean(L), or if weight is provided: ReduceSum(L) / ReduceSum(W), -where tensor W is of shape (N, D1, D2, ..., Dk) and W[n][d1][d2]...[dk] = weights[labels[i][d1][d2]...[dk]]. + +* If reduction = 'none', the output is L with shape (N, D1, D2, ..., Dk). +* If reduction = 'sum', the output is scalar: Sum(L). +* If reduction = 'mean', the output is scalar: ReduceMean(L), or if weight is provided: `ReduceSum(L) / ReduceSum(W)`, + where tensor W is of shape `(N, D1, D2, ..., Dk)` and `W[n][d1][d2]...[dk] = weights[labels[i][d1][d2]...[dk]]`.

Python version: `onnx_ops.softmaxcrossentropyloss(scores, labels, weights, ignore_index, reduction)` @@ -3277,16 +3244,19 @@ Python version: `onnx_ops.split(input, split, axis)` ## SplitToSequence -

Split a tensor into a sequence of tensors, along the specified -'axis'. Lengths of the parts can be specified using argument 'split'. +

+Split a tensor into a sequence of tensors, along the specified 'axis'. +Lengths of the parts can be specified using the optional argument 'split'. +If the argument `split' is not specified, a default scalar value of 1 +is used as the value of `split'. 'split' must contain only positive numbers. 'split' is either a scalar (tensor of empty shape), or a 1-D tensor. -If 'split' is a scalar, then 'input' will be split into equally sized chunks(if possible). -Last chunk will be smaller if the 'input' size along the given axis 'axis' is not divisible -by 'split'. -Otherwise, the tensor is split into 'size(split)' chunks, with lengths of the parts on 'axis' -specified in 'split'. In this scenario, the sum of entries in 'split' must be equal to the -dimension size of input tensor on 'axis'. +If 'split' is a scalar, then 'input' will be split into chunks all of size 'split' +if possible. The last chunk alone may be smaller than 'split' if the 'input' size +along the given axis 'axis' is not divisible by 'split'. +If 'split' is a 1-dimensional tensor, the input tensor is split into 'size(split)' chunks, +with lengths of the parts on 'axis' specified in 'split'. In this scenario, the sum of entries +in 'split' must be equal to the dimension size of input tensor on 'axis'.

Python version: `onnx_ops.splittosequence(input, split, axis, keepdims)` @@ -3446,18 +3416,19 @@ Python version: `onnx_ops.tile(input, repeats)`

Retrieve the top-K largest or smallest elements along a specified axis. Given an input tensor of shape [a_1, a_2, ..., a_n, r] and integer argument k, return two outputs: - -Value tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] - which contains the values of the top k elements along the specified axis - -Index tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] which - contains the indices of the top k elements (original indices from the input - tensor). -If "largest" is 1 (the default value) then the k largest elements are returned. -If "sorted" is 1 (the default value) then the resulting k elements will be sorted. -If "sorted" is 0, order of returned 'Values' and 'Indices' are undefined. +* Value tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] + which contains the values of the top k elements along the specified axis +* Index tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] which + contains the indices of the top k elements (original indices from the input + tensor). + +* If "largest" is 1 (the default value) then the k largest elements are returned. +* If "sorted" is 1 (the default value) then the resulting k elements will be sorted. +* If "sorted" is 0, order of returned 'Values' and 'Indices' are undefined. Given two equivalent values, this operator uses the indices along the axis as - a tiebreaker. That is, the element with the lower index will appear first. +a tiebreaker. That is, the element with the lower index will appear first.

Python version: `onnx_ops.topk(X, K, axis, largest, sorted)` @@ -3514,67 +3485,89 @@ Outputs are either sorted in ascending order or optionally in the order of the f https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html Example 1: - input_X = [2, 1, 1, 3, 4, 3] - attribute_sorted = 0 - attribute_axis = None - output_Y = [2, 1, 3, 4] - output_indices = [0, 1, 3, 4] - output_inverse_indices = [0, 1, 1, 2, 3, 2] - output_counts = [1, 2, 2, 1] +``` +input_X = [2, 1, 1, 3, 4, 3] +attribute_sorted = 0 +attribute_axis = None +output_Y = [2, 1, 3, 4] +output_indices = [0, 1, 3, 4] +output_inverse_indices = [0, 1, 1, 2, 3, 2] +output_counts = [1, 2, 2, 1] +``` Example 2: - input_X = [[1, 3], [2, 3]] - attribute_sorted = 1 - attribute_axis = None - output_Y = [1, 2, 3] - output_indices = [0, 2, 1] - output_inverse_indices = [0, 2, 1, 2] - output_counts = [1, 1, 2] +``` +input_X = [[1, 3], [2, 3]] +attribute_sorted = 1 +attribute_axis = None +output_Y = [1, 2, 3] +output_indices = [0, 2, 1] +output_inverse_indices = [0, 2, 1, 2] +output_counts = [1, 1, 2] +``` Example 3: - input_X = [[1, 0, 0], [1, 0, 0], [2, 3, 4]] - attribute_sorted = 1 - attribute_axis = 0 - output_Y = [[1, 0, 0], [2, 3, 4]] - output_indices = [0, 2] - output_inverse_indices = [0, 0, 1] - output_counts = [2, 1] +``` +input_X = [[1, 0, 0], [1, 0, 0], [2, 3, 4]] +attribute_sorted = 1 +attribute_axis = 0 +output_Y = [[1, 0, 0], [2, 3, 4]] +output_indices = [0, 2] +output_inverse_indices = [0, 0, 1] +output_counts = [2, 1] +``` Example 4: - input_x = [[[1., 1.], [0., 1.], [2., 1.], [0., 1.]], - [[1., 1.], [0., 1.], [2., 1.], [0., 1.]]] - attribute_sorted = 1 - attribute_axis = 1 - - intermediate data are presented below for better understanding: +``` +input_x = [[[1., 1.], [0., 1.], [2., 1.], [0., 1.]], + [[1., 1.], [0., 1.], [2., 1.], [0., 1.]]] +attribute_sorted = 1 +attribute_axis = 1 +``` - there are 4 subtensors sliced along axis 1 of input_x (shape = (2, 4, 2)): - A: [[1, 1], [1, 1]], - [[0, 1], [0, 1]], - [[2, 1], [2, 1]], - [[0, 1], [0, 1]]. +intermediate data are presented below for better understanding: +there are 4 subtensors sliced along axis 1 of input_x (shape = (2, 4, 2)): +``` +A: [[1, 1], [1, 1]], + [[0, 1], [0, 1]], + [[2, 1], [2, 1]], + [[0, 1], [0, 1]]. +``` - there are 3 unique subtensors: - [[1, 1], [1, 1]], - [[0, 1], [0, 1]], - [[2, 1], [2, 1]]. +there are 3 unique subtensors: +``` +[[1, 1], [1, 1]], +[[0, 1], [0, 1]], +[[2, 1], [2, 1]]. +``` - sorted unique subtensors: - B: [[0, 1], [0, 1]], - [[1, 1], [1, 1]], - [[2, 1], [2, 1]]. +sorted unique subtensors: +``` +B: [[0, 1], [0, 1]], + [[1, 1], [1, 1]], + [[2, 1], [2, 1]]. +``` - output_Y is constructed from B: - [[[0. 1.], [1. 1.], [2. 1.]], - [[0. 1.], [1. 1.], [2. 1.]]] +output_Y is constructed from B: +``` +[[[0. 1.], [1. 1.], [2. 1.]], + [[0. 1.], [1. 1.], [2. 1.]]] +``` - output_indices is to map from B to A: - [1, 0, 2] +output_indices is to map from B to A: +``` +[1, 0, 2] +``` - output_inverse_indices is to map from A to B: - [1, 0, 2, 0] +output_inverse_indices is to map from A to B: +``` +[1, 0, 2, 0] +``` - output_counts = [2 1 1] +output_counts: +``` +[2, 1, 1] +```

Python version: `onnx_ops.unique(X, axis, sorted)` @@ -3587,15 +3580,13 @@ Python version: `onnx_ops.unique(X, axis, sorted)` Insert single-dimensional entries to the shape of an input tensor (`data`). Takes one required input `axes` - which contains a list of dimension indices and this operator will insert a dimension of value `1` into the corresponding index of the output tensor (`expanded`). -For example: - Given an input tensor (`data`) of shape [3, 4, 5], then - Unsqueeze(data, axes=[0, 4]) outputs a tensor (`expanded`) containing same data as `data` but with shape [1, 3, 4, 5, 1]. +For example, given an input tensor (`data`) of shape [3, 4, 5], then +Unsqueeze(data, axes=[0, 4]) outputs a tensor (`expanded`) containing same data as `data` but with shape [1, 3, 4, 5, 1]. The input `axes` should not contain any duplicate entries. It is an error if it contains duplicates. The rank of the output tensor (`output_rank`) is the rank of the input tensor (`data`) plus the number of values in `axes`. Each value in `axes` should be within the (inclusive) range [-output_rank , output_rank - 1]. The order of values in `axes` does not matter and can come in any order. -

Python version: `onnx_ops.unsqueeze(data, axes)` @@ -3644,25 +3635,25 @@ Python version: `onnx_ops.xor(A, B)` ## pattern_matching_function

Returns the productions that match the given goal and retrieval buffers.

-

pattern_matching_function(productions, goal, retrieval) = pattern_matching_function(productions,goal,retrieval)

+

pattern_matching_function(productions, goal, retrieval) = actr.pattern_matching_function(productions,goal,retrieval)

-Python version: `pattern_matching_function(productions,goal,retrieval)` +Python version: `actr.pattern_matching_function(productions,goal,retrieval)` ## pattern_to_string

Converts a pattern dictionary to a string format.

-

pattern_to_string(chunk) = pattern_to_string(chunk)

+

pattern_to_string(chunk) = actr.pattern_to_string(chunk)

-Python version: `pattern_to_string(chunk)` +Python version: `actr.pattern_to_string(chunk)` ## retrieve_chunk

Retrieve a chunk from declarative memory given a pattern.

-

retrieve_chunk(pattern, dm_chunks, types) = retrieve_chunk(pattern,dm_chunks,types)

+

retrieve_chunk(pattern, dm_chunks, types) = actr.retrieve_chunk(pattern,dm_chunks,types)

-Python version: `retrieve_chunk(pattern,dm_chunks,types)` +Python version: `actr.retrieve_chunk(pattern,dm_chunks,types)` @@ -3700,22 +3691,22 @@ Python version: `scale * numpy.tanh(variable0)` ## update_buffer

Returns a pattern to update the given buffer with.

-

update_buffer(production, buffer) = update_buffer(production,buffer)

+

update_buffer(production, buffer) = actr.update_buffer(production,buffer)

-Python version: `update_buffer(production,buffer)` +Python version: `actr.update_buffer(production,buffer)` ## update_goal

Returns a pattern to update the goal buffer with.

-

update_goal(production) = update_goal(production)

+

update_goal(production) = actr.update_goal(production)

-Python version: `update_goal(production)` +Python version: `actr.update_goal(production)` ## update_retrieval

Returns a pattern to update the retrieval buffer with.

-

update_retrieval(production) = update_retrieval(production)

+

update_retrieval(production) = actr.update_retrieval(production)

-Python version: `update_retrieval(production)` +Python version: `actr.update_retrieval(production)` diff --git a/docs/MDF_function_specifications.yaml b/docs/MDF_function_specifications.yaml index e9729b427..2bb9bf086 100644 --- a/docs/MDF_function_specifications.yaml +++ b/docs/MDF_function_specifications.yaml @@ -14,17 +14,17 @@ change_goal: arguments: - pattern - curr_goal - expression_string: change_goal(pattern,curr_goal) + expression_string: actr.change_goal(pattern,curr_goal) check_termination: description: Function used to check if no production was selected. arguments: - production - expression_string: check_termination(production) + expression_string: actr.check_termination(production) chunk_to_string: description: Converts a chunk dictionary to a string format. arguments: - chunk - expression_string: chunk_to_string(chunk) + expression_string: actr.chunk_to_string(chunk) conflict_resolution_function: description: 'ACT-R conflict resolution function. Currently selects a production at random from the already matched productions, since utility values and learning @@ -32,7 +32,7 @@ conflict_resolution_function: are not implemented yet.' arguments: - productions - expression_string: conflict_resolution_function(productions) + expression_string: actr.conflict_resolution_function(productions) cos: description: Cosine function arguments: @@ -58,7 +58,7 @@ drift_diffusion_integrator: - threshold - noise - dt - expression_string: drift_diffusion_integrator(starting_point,non_decision_time,drift_rate,threshold,noise,dt) + expression_string: ddm.drift_diffusion_integrator(starting_point,non_decision_time,drift_rate,threshold,noise,dt) exponential: description: Exponential function arguments: @@ -89,7 +89,7 @@ match_production: arguments: - production - context - expression_string: match_production(production,context) + expression_string: actr.match_production(production,context) onnx::Abs: description: ' @@ -249,20 +249,20 @@ onnx::AveragePool: \ subset of the input tensor according to the kernel size and downsampling\ \ the\n data into the output tensor Y for further processing. The output spatial\ \ shape will be following:\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i]\ - \ + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)\n\ - \ ```\n or\n ```\n output_spatial_shape[i] = ceil((input_spatial_shape[i]\ - \ + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)\n\ - \ ```\n if ceil_mode is enabled\n\n ```\n * pad_shape[i] is sum of pads along\ - \ axis i\n ```\n\n `auto_pad` is a DEPRECATED attribute. If you are using\ - \ them currently, the output spatial shape will be following:\n ```\n VALID:\ - \ output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i]\ - \ + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i]\ - \ = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n And pad\ - \ shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i]\ - \ = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i]\ - \ - input_spatial_shape[i]\n ```\n The output of each pooling window is divided\ - \ by the number of elements (exclude pad when attribute count_include_pad\ - \ is zero).\n " + \ + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i]\ + \ + 1)\n ```\n or\n ```\n output_spatial_shape[i] = ceil((input_spatial_shape[i]\ + \ + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i]\ + \ + 1)\n ```\n if ceil_mode is enabled\n\n ```\n * pad_shape[i] is sum of\ + \ pads along axis i\n ```\n\n `auto_pad` is a DEPRECATED attribute. If you\ + \ are using them currently, the output spatial shape will be following:\n\ + \ ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i]\ + \ - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i])\n SAME_UPPER or\ + \ SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n\ + \ ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n\ + \ ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i]\ + \ + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i]\n\ + \ ```\n The output of each pooling window is divided by the number of elements\ + \ (exclude pad when attribute count_include_pad is zero).\n " arguments: - X expression_string: onnx_ops.averagepool(X, auto_pad, ceil_mode, count_include_pad, @@ -287,9 +287,9 @@ onnx::BatchNormalization: There are multiple cases for the number of outputs, which we list below: - Output case #1: Y, running_mean, running_var (training_mode=True) + * Output case #1: Y, running_mean, running_var (training_mode=True) - Output case #2: Y (training_mode=False) + * Output case #2: Y (training_mode=False) When training_mode=False, extra outputs are invalid. @@ -305,24 +305,24 @@ onnx::BatchNormalization: Y = (X - current_mean) / sqrt(current_var + epsilon) * scale + B + ``` where: + ``` current_mean = ReduceMean(X, axis=all_except_channel_index) current_var = ReduceVar(X, axis=all_except_channel_index) + ``` - Notice that ReduceVar refers to the population variance, and it equals to - - sum(sqrd(x_i - x_avg)) / N - - where N is the population size (this formula does not use sample size N - - 1). + Notice that `ReduceVar` refers to the population variance, and it equals to + `sum(sqrd(x_i - x_avg)) / N` - ``` + where `N` is the population size (this formula does not use sample size `N + - 1`). The computation of ReduceMean and ReduceVar uses float to avoid overflow for @@ -381,19 +381,39 @@ onnx::Bernoulli: - input expression_string: onnx_ops.bernoulli(input, dtype, seed) onnx::BitShift: - description: "\nBitwise shift operator performs element-wise operation. For each\ - \ input element, if the\n attribute \"direction\" is \"RIGHT\", this operator\ - \ moves its binary representation toward\n the right side so that the input\ - \ value is effectively decreased. If the attribute \"direction\"\n is \"LEFT\"\ - , bits of binary representation moves toward the left side, which results\ - \ the\n increase of its actual value. The input X is the tensor to be shifted\ - \ and another input\n Y specifies the amounts of shifting. For example, if\ - \ \"direction\" is \"Right\", X is [1, 4],\n and S is [1, 1], the corresponding\ - \ output Z would be [0, 2]. If \"direction\" is \"LEFT\" with\n X=[1, 2] and\ - \ S=[1, 2], the corresponding output Y would be [2, 8].\n\n Because this operator\ - \ supports Numpy-style broadcasting, X's and Y's shapes are\n not necessarily\ - \ identical.\nThis operator supports **multidirectional (i.e., Numpy-style)\ - \ broadcasting**; for more details please check [the doc](Broadcasting.md)." + description: ' + + Bitwise shift operator performs element-wise operation. For each input element, + if the + + attribute "direction" is "RIGHT", this operator moves its binary representation + toward + + the right side so that the input value is effectively decreased. If the attribute + "direction" + + is "LEFT", bits of binary representation moves toward the left side, which + results the + + increase of its actual value. The input X is the tensor to be shifted and + another input + + Y specifies the amounts of shifting. For example, if "direction" is "Right", + X is [1, 4], + + and S is [1, 1], the corresponding output Z would be [0, 2]. If "direction" + is "LEFT" with + + X=[1, 2] and S=[1, 2], the corresponding output Y would be [2, 8]. + + + Because this operator supports Numpy-style broadcasting, X''s and Y''s shapes + are + + not necessarily identical. + + This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; + for more details please check [the doc](Broadcasting.md).' arguments: - X - Y @@ -406,7 +426,7 @@ onnx::Cast: \nCasting from string tensor in plain (e.g., \"3.14\" and \"1000\") and scientific\ \ numeric representations\n(e.g., \"1e-5\" and \"1E8\") to float types is\ \ supported. For example, converting string \"100.5\" to an integer may\n\ - result 100. There are some string literals reserved for special floating-point\ + yield result 100. There are some string literals reserved for special floating-point\ \ values;\n\"+INF\" (and \"INF\"), \"-INF\", and \"NaN\" are positive infinity,\ \ negative infinity, and not-a-number, respectively.\nAny string which can\ \ exactly match \"+INF\" in a case-insensitive way would be mapped to positive\ @@ -427,8 +447,8 @@ onnx::Cast: \ True.\n* Casting from fixed point to:\n * floating point: +/- infinity\ \ if OOR. (+ infinity in the case of uint)\n * fixed point: when OOR, discard\ \ higher bits and reinterpret (with respect to two's complement representation\ - \ for\nsigned types). For example, 200 (int16) -> -56 (int8).\n * bool: zero\ - \ to False; nonzero to True.\n* Casting from bool to:\n * floating point:\ + \ for\n signed types). For example, 200 (int16) -> -56 (int8).\n * bool:\ + \ zero to False; nonzero to True.\n* Casting from bool to:\n * floating point:\ \ `{1.0, 0.0}`.\n * fixed point: `{1, 0}`.\n * bool: no change.\n" arguments: - input @@ -455,7 +475,8 @@ onnx::Ceil: (Tensor) where the ceil is, y = ceil(x), is applied to - the tensor elementwise. + the tensor elementwise. If x is integral, +0, -0, NaN, or infinite, x itself + is returned. ' arguments: @@ -648,17 +669,17 @@ onnx::DepthToSpace: the input x as below: - b, c, h, w = x.shape + ``` + b, c, h, w = x.shape tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w]) - tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2]) - y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize]) + ``` In the CRD mode, elements along the depth dimension from the input tensor @@ -668,17 +689,17 @@ onnx::DepthToSpace: the input x as below: - b, c, h, w = x.shape + ``` + b, c, h, w = x.shape tmp = np.reshape(x, [b, c // (blocksize ** 2), blocksize, blocksize, h, w]) - tmp = np.transpose(tmp, [0, 1, 4, 2, 5, 3]) - y = np.reshape(tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize]) + ``` ' arguments: @@ -690,13 +711,13 @@ onnx::DequantizeLinear: The linear dequantization operator. It consumes a quantized tensor, a scale, and a zero point to compute the full precision tensor. - The dequantization formula is y = (x - x_zero_point) * x_scale. ''x_scale'' - and ''x_zero_point'' must have same shape, and can be either a scalar + The dequantization formula is `y = (x - x_zero_point) * x_scale`. `x_scale` + and `x_zero_point` must have same shape, and can be either a scalar for per-tensor / per layer quantization, or a 1-D tensor for per-axis quantization. - ''x_zero_point'' and ''x'' must have same type. ''x'' and ''y'' must have - same shape. In the case of dequantizing int32, + `x_zero_point` and `x` must have same type. `x` and `y` must have same shape. + In the case of dequantizing int32, there''s no zero point (zero point is supposed to be 0). @@ -785,31 +806,78 @@ onnx::Dropout: - training_mode expression_string: onnx_ops.dropout(data, ratio, training_mode, seed) onnx::DynamicQuantizeLinear: - description: "\nA Function to fuse calculation for Scale, Zero Point and FP32->8Bit\ - \ convertion of FP32 Input data.\nOutputs Scale, ZeroPoint and Quantized Input\ - \ for a given FP32 Input.\nScale is calculated as:\n```\n y_scale = (max(x)\ - \ - min(x))/(qmax - qmin)\n * where qmax and qmin are max and min values for\ - \ quantization range .i.e [0, 255] in case of uint8\n * data range is adjusted\ - \ to include 0.\n```\nZero point is calculated as:\n```\nintermediate_zero_point\ - \ = qmin - min(x)/y_scale\ny_zero_point = cast(round(saturate(itermediate_zero_point)))\n\ - * where qmax and qmin are max and min values for quantization range .i.e [0,\ - \ 255] in case of uint8\n* for saturation, it saturates to [0, 255] if it's\ - \ uint8, or [-127, 127] if it's int8. Right now only uint8 is supported.\n\ - * rounding to nearest ties to even.\n```\nData quantization formula is:\n\ - ```\ny = saturate (round (x / y_scale) + y_zero_point)\n* for saturation,\ - \ it saturates to [0, 255] if it's uint8, or [-127, 127] if it's int8. Right\ - \ now only uint8 is supported.\n* rounding to nearest ties to even.\n```\n" + description: ' + + A Function to fuse calculation for Scale, Zero Point and FP32->8Bit convertion + of FP32 Input data. + + Outputs Scale, ZeroPoint and Quantized Input for a given FP32 Input. + + Scale is calculated as: + + ``` + + y_scale = (max(x) - min(x))/(qmax - qmin) + + ``` + + + * where qmax and qmin are max and min values for quantization range .i.e [0, + 255] in case of uint8 + + * data range is adjusted to include 0. + + + Zero point is calculated as: + + ``` + + intermediate_zero_point = qmin - min(x)/y_scale + + y_zero_point = cast(round(saturate(itermediate_zero_point))) + + ``` + + + * where qmax and qmin are max and min values for quantization range .i.e [0, + 255] in case of uint8 + + * for saturation, it saturates to [0, 255] if it''s uint8, or [-127, 127] + if it''s int8. Right now only uint8 is supported. + + * rounding to nearest ties to even. + + + Data quantization formula is: + + ``` + + y = saturate (round (x / y_scale) + y_zero_point) + + ``` + + + * for saturation, it saturates to [0, 255] if it''s uint8, or [-127, 127] + if it''s int8. Right now only uint8 is supported. + + * rounding to nearest ties to even. + + ' arguments: - x expression_string: onnx_ops.dynamicquantizelinear(x) onnx::Einsum: description: ' - An einsum of the form ```term1, term2 -> output-term``` produces an output - tensor using the following equation + An einsum of the form `term1, term2 -> output-term` produces an output tensor + using the following equation - ```output[output-term] = reduce-sum( input1[term1] * input2[term] )``` + ``` + + output[output-term] = reduce-sum( input1[term1] * input2[term] ) + + ``` where the reduce-sum performs a summation over all the indices occurring in @@ -985,7 +1053,8 @@ onnx::Floor: (Tensor) where the floor is, y = floor(x), is applied to - the tensor elementwise. + the tensor elementwise. If x is integral, +0, -0, NaN, or infinite, x itself + is returned. ' arguments: @@ -993,36 +1062,35 @@ onnx::Floor: expression_string: onnx_ops.floor(X) onnx::GRU: description: "\nComputes an one-layer GRU. This operator is usually supported\ - \ via some custom\nimplementation such as CuDNN.\n\nNotations:\n\n`X` - input\ - \ tensor\n\n`z` - update gate\n\n`r` - reset gate\n\n`h` - hidden gate\n\n\ - `t` - time step (t-1 means previous time step)\n\n`W[zrh]` - W parameter weight\ - \ matrix for update, reset, and hidden gates\n\n`R[zrh]` - R recurrence weight\ - \ matrix for update, reset, and hidden gates\n\n`Wb[zrh]` - W bias vectors\ - \ for update, reset, and hidden gates\n\n`Rb[zrh]` - R bias vectors for update,\ - \ reset, and hidden gates\n\n`WB[zrh]` - W parameter weight matrix for backward\ - \ update, reset, and hidden gates\n\n`RB[zrh]` - R recurrence weight matrix\ - \ for backward update, reset, and hidden gates\n\n`WBb[zrh]` - W bias vectors\ - \ for backward update, reset, and hidden gates\n\n`RBb[zrh]` - R bias vectors\ - \ for backward update, reset, and hidden gates\n\n`H` - Hidden state\n\n`num_directions`\ - \ - 2 if direction == bidirectional else 1\n\nActivation functions:\n\n Relu(x)\ - \ - max(0, x)\n\n Tanh(x) - (1 - e^{-2x})/(1\ - \ + e^{-2x})\n\n Sigmoid(x) - 1/(1 + e^{-x})\n\n (NOTE: Below\ - \ are optional)\n\n Affine(x) - alpha*x + beta\n\n LeakyRelu(x)\ - \ - x if x >= 0 else alpha * x\n\n ThresholdedRelu(x) - x if\ - \ x >= alpha else 0\n\n ScaledTanh(x) - alpha*Tanh(beta*x)\n\n \ - \ HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)\n\n Elu(x) \ - \ - x if x >= 0 else alpha*(e^x - 1)\n\n Softsign(x) \ - \ - x/(1 + |x|)\n\n Softplus(x) - log(1 + e^x)\n\nEquations\ - \ (Default: f=Sigmoid, g=Tanh):\n\n - zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz\ - \ + Rbz)\n\n - rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr)\n\n - ht = g(Xt*(Wh^T)\ + \ via some custom\nimplementation such as CuDNN.\n\nNotations:\n\n* `X` -\ + \ input tensor\n* `z` - update gate\n* `r` - reset gate\n* `h` - hidden gate\n\ + * `t` - time step (t-1 means previous time step)\n* `W[zrh]` - W parameter\ + \ weight matrix for update, reset, and hidden gates\n* `R[zrh]` - R recurrence\ + \ weight matrix for update, reset, and hidden gates\n* `Wb[zrh]` - W bias\ + \ vectors for update, reset, and hidden gates\n* `Rb[zrh]` - R bias vectors\ + \ for update, reset, and hidden gates\n* `WB[zrh]` - W parameter weight matrix\ + \ for backward update, reset, and hidden gates\n* `RB[zrh]` - R recurrence\ + \ weight matrix for backward update, reset, and hidden gates\n* `WBb[zrh]`\ + \ - W bias vectors for backward update, reset, and hidden gates\n* `RBb[zrh]`\ + \ - R bias vectors for backward update, reset, and hidden gates\n* `H` - Hidden\ + \ state\n* `num_directions` - 2 if direction == bidirectional else 1\n\nActivation\ + \ functions:\n\n* Relu(x) - max(0, x)\n* Tanh(x) \ + \ - (1 - e^{-2x})/(1 + e^{-2x})\n* Sigmoid(x) - 1/(1 + e^{-x})\n\ + \nNOTE:\n Below are optional\n\n* Affine(x) - alpha * x + beta\n\ + * LeakyRelu(x) - x if x >= 0 else alpha * x\n* ThresholdedRelu(x)\ + \ - x if x >= alpha else 0\n* ScaledTanh(x) - alpha * Tanh(beta\ + \ * x)\n* HardSigmoid(x) - min(max(alpha * x + beta, 0), 1)\n* Elu(x)\ + \ - x if x >= 0 else alpha * (e^x - 1)\n* Softsign(x) \ + \ - x/(1 + |x|)\n* Softplus(x) - log(1 + e^x)\n\nEquations\ + \ (Default: f=Sigmoid, g=Tanh):\n\n* zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz\ + \ + Rbz)\n* rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr)\n* ht = g(Xt*(Wh^T)\ \ + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh) # default, when linear_before_reset\ - \ = 0\n\n - ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when\ - \ linear_before_reset != 0\n\n - Ht = (1 - zt) (.) ht + zt (.) Ht-1\nThis\ - \ operator has **optional** inputs/outputs. See [the doc](IR.md) for more\ - \ details about the representation of optional arguments. An empty string\ - \ may be used in the place of an actual argument's name to indicate a missing\ - \ argument. Trailing optional arguments (those not followed by an argument\ - \ that is present) may also be simply omitted.\n" + \ = 0\n* ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset\ + \ != 0\n* Ht = (1 - zt) (.) ht + zt (.) Ht-1\nThis operator has **optional**\ + \ inputs/outputs. See [the doc](IR.md) for more details about the representation\ + \ of optional arguments. An empty string may be used in the place of an actual\ + \ argument's name to indicate a missing argument. Trailing optional arguments\ + \ (those not followed by an argument that is present) may also be simply omitted.\n" arguments: - X - W @@ -1036,18 +1104,17 @@ onnx::Gather: description: "\nGiven `data` tensor of rank r >= 1, and `indices` tensor of rank\ \ q, gather\nentries of the axis dimension of `data` (by default outer-most\ \ one as axis=0) indexed by `indices`, and concatenates\nthem in an output\ - \ tensor of rank q + (r - 1).\n\naxis = 0 :\n\nLet\nk = indices[i_{0}, ...,\ - \ i_{q-1}]\nThen\noutput[i_{0}, ..., i_{q-1}, j_{0}, ..., j_{r-2}] = input[k\ - \ , j_{0}, ..., j_{r-2}]\n\n```\n data = [\n [1.0, 1.2],\n [2.3,\ - \ 3.4],\n [4.5, 5.7],\n ]\n indices = [\n [0, 1],\n [1, 2],\n\ - \ ]\n output = [\n [\n [1.0, 1.2],\n [2.3, 3.4],\n\ - \ ],\n [\n [2.3, 3.4],\n [4.5, 5.7],\n ],\n\ - \ ]\n```\naxis = 1 :\n\nLet\nk = indices[i_{0}, ..., i_{q-1}]\nThen\noutput[j_{0},\ - \ i_{0}, ..., i_{q-1}, j_{1}, ..., j_{r-2}] = input[j_{0}, k, j_{1}, ...,\ - \ j_{r-2}]\n\n```\n data = [\n [1.0, 1.2, 1.9],\n [2.3, 3.4, 3.9],\n\ - \ [4.5, 5.7, 5.9],\n ]\n indices = [\n [0, 2],\n ]\n axis =\ - \ 1,\n output = [\n [[1.0, 1.9]],\n [[2.3, 3.9]],\n \ - \ [[4.5, 5.9]],\n ]\n```\n" + \ tensor of rank q + (r - 1).\n\nIf `axis = 0`, let `k = indices[i_{0}, ...,\ + \ i_{q-1}]`\nthen `output[i_{0}, ..., i_{q-1}, j_{0}, ..., j_{r-2}] = input[k\ + \ , j_{0}, ..., j_{r-2}]`:\n\n```\ndata = [\n [1.0, 1.2],\n [2.3, 3.4],\n\ + \ [4.5, 5.7],\n]\nindices = [\n [0, 1],\n [1, 2],\n]\noutput = [\n\ + \ [\n [1.0, 1.2],\n [2.3, 3.4],\n ],\n [\n [2.3,\ + \ 3.4],\n [4.5, 5.7],\n ],\n]\n```\n\nIf `axis = 1`, let `k = indices[i_{0},\ + \ ..., i_{q-1}]`\nthen `output[j_{0}, i_{0}, ..., i_{q-1}, j_{1}, ..., j_{r-2}]\ + \ = input[j_{0}, k, j_{1}, ..., j_{r-2}]`:\n\n```\ndata = [\n [1.0, 1.2,\ + \ 1.9],\n [2.3, 3.4, 3.9],\n [4.5, 5.7, 5.9],\n]\nindices = [\n [0,\ + \ 2],\n]\naxis = 1,\noutput = [\n [[1.0, 1.9]],\n [[2.3, 3.9]],\n\ + \ [[4.5, 5.9]],\n]\n```\n" arguments: - data - indices @@ -1061,16 +1128,15 @@ onnx::GatherElements: \ output shape is the same as the shape of `indices` and consists of one value\n\ (gathered from the `data`) for each element in `indices`.\n\nFor instance,\ \ in the 3-D case (r = 3), the output produced is determined\nby the following\ - \ equations:\n```\n out[i][j][k] = input[index[i][j][k]][j][k] if axis =\ - \ 0,\n out[i][j][k] = input[i][index[i][j][k]][k] if axis = 1,\n out[i][j][k]\ - \ = input[i][j][index[i][j][k]] if axis = 2,\n```\n\nThis operator is also\ - \ the inverse of ScatterElements. It is similar to Torch's gather operation.\n\ - \nExample 1:\n```\n data = [\n [1, 2],\n [3, 4],\n ]\n indices\ - \ = [\n [0, 0],\n [1, 0],\n ]\n axis = 1\n output = [\n \ - \ [1, 1],\n [4, 3],\n ]\n```\nExample 2:\n```\n data = [\n [1,\ - \ 2, 3],\n [4, 5, 6],\n [7, 8, 9],\n ]\n indices = [\n [1,\ - \ 2, 0],\n [2, 0, 0],\n ]\n axis = 0\n output = [\n [4, 8, 3],\n\ - \ [7, 2, 3],\n ]\n```\n" + \ equations:\n```\nout[i][j][k] = input[index[i][j][k]][j][k] if axis = 0,\n\ + out[i][j][k] = input[i][index[i][j][k]][k] if axis = 1,\nout[i][j][k] = input[i][j][index[i][j][k]]\ + \ if axis = 2,\n```\n\nThis operator is also the inverse of ScatterElements.\ + \ It is similar to Torch's gather operation.\n\nExample 1:\n```\ndata = [\n\ + \ [1, 2],\n [3, 4],\n]\nindices = [\n [0, 0],\n [1, 0],\n]\naxis\ + \ = 1\noutput = [\n [1, 1],\n [4, 3],\n]\n```\nExample 2:\n```\ndata\ + \ = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9],\n]\nindices = [\n \ + \ [1, 2, 0],\n [2, 0, 0],\n]\naxis = 0\noutput = [\n [4, 8, 3],\n \ + \ [7, 2, 3],\n]\n```\n" arguments: - data - indices @@ -1135,10 +1201,9 @@ onnx::Gemm: https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3 - A'' = transpose(A) if transA else A + * A'' = transpose(A) if transA else A - - B'' = transpose(B) if transB else B + * B'' = transpose(B) if transB else B Compute Y = alpha * A'' * B'' + beta * C, where input tensor A has shape (M, @@ -1302,63 +1367,135 @@ onnx::LRN: It normalizes over local input regions. - The local region is defined across the channels. For an element X[n, c, d1, - ..., dk] in a tensor + The local region is defined across the channels. For an element `X[n, c, d1, + ..., dk]` in a tensor - of shape (N x C x D1 x D2, ..., Dk), its region is + of shape `(N x C x D1 x D2, ..., Dk)`, its region is - {X[n, i, d1, ..., dk] | max(0, c - floor((size - 1) / 2)) <= i <= min(C - - 1, c + ceil((size - 1) / 2))}. + `{X[n, i, d1, ..., dk] | max(0, c - floor((size - 1) / 2)) <= i <= min(C - + 1, c + ceil((size - 1) / 2))}`. - square_sum[n, c, d1, ..., dk] = sum(X[n, i, d1, ..., dk] ^ 2), + `square_sum[n, c, d1, ..., dk] = sum(X[n, i, d1, ..., dk] ^ 2)`, - where max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - - 1) / 2)). + where `max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size + - 1) / 2))`. - Y[n, c, d1, ..., dk] = X[n, c, d1, ..., dk] / (bias + alpha / size * square_sum[n, - c, d1, ..., dk] ) ^ beta + `Y[n, c, d1, ..., dk] = X[n, c, d1, ..., dk] / (bias + alpha / size * square_sum[n, + c, d1, ..., dk] ) ^ beta` ' arguments: - X expression_string: onnx_ops.lrn(X, alpha, beta, bias, size) onnx::LSTM: - description: "\nComputes an one-layer LSTM. This operator is usually supported\ - \ via some\ncustom implementation such as CuDNN.\n\nNotations:\n\n`X` - input\ - \ tensor\n\n`i` - input gate\n\n`o` - output gate\n\n`f` - forget gate\n\n\ - `c` - cell gate\n\n`t` - time step (t-1 means previous time step)\n\n`W[iofc]`\ - \ - W parameter weight matrix for input, output, forget, and cell gates\n\n\ - `R[iofc]` - R recurrence weight matrix for input, output, forget, and cell\ - \ gates\n\n`Wb[iofc]` - W bias vectors for input, output, forget, and cell\ - \ gates\n\n`Rb[iofc]` - R bias vectors for input, output, forget, and cell\ - \ gates\n\n`P[iof]` - P peephole weight vector for input, output, and forget\ - \ gates\n\n`WB[iofc]` - W parameter weight matrix for backward input, output,\ - \ forget, and cell gates\n\n`RB[iofc]` - R recurrence weight matrix for backward\ - \ input, output, forget, and cell gates\n\n`WBb[iofc]` - W bias vectors for\ - \ backward input, output, forget, and cell gates\n\n`RBb[iofc]` - R bias vectors\ - \ for backward input, output, forget, and cell gates\n\n`PB[iof]` - P peephole\ - \ weight vector for backward input, output, and forget gates\n\n`H` - Hidden\ - \ state\n\n`num_directions` - 2 if direction == bidirectional else 1\n\nActivation\ - \ functions:\n\n Relu(x) - max(0, x)\n\n Tanh(x) \ - \ - (1 - e^{-2x})/(1 + e^{-2x})\n\n Sigmoid(x) - 1/(1\ - \ + e^{-x})\n\n (NOTE: Below are optional)\n\n Affine(x) -\ - \ alpha*x + beta\n\n LeakyRelu(x) - x if x >= 0 else alpha * x\n\ - \n ThresholdedRelu(x) - x if x >= alpha else 0\n\n ScaledTanh(x) \ - \ - alpha*Tanh(beta*x)\n\n HardSigmoid(x) - min(max(alpha*x\ - \ + beta, 0), 1)\n\n Elu(x) - x if x >= 0 else alpha*(e^x\ - \ - 1)\n\n Softsign(x) - x/(1 + |x|)\n\n Softplus(x) \ - \ - log(1 + e^x)\n\nEquations (Default: f=Sigmoid, g=Tanh, h=Tanh):\n\n\ - \ - it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi)\n\n - ft =\ - \ f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf)\n\n - ct = g(Xt*(Wc^T)\ - \ + Ht-1*(Rc^T) + Wbc + Rbc)\n\n - Ct = ft (.) Ct-1 + it (.) ct\n\n - ot\ - \ = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo)\n\n - Ht = ot (.)\ - \ h(Ct)\nThis operator has **optional** inputs/outputs. See [the doc](IR.md)\ - \ for more details about the representation of optional arguments. An empty\ - \ string may be used in the place of an actual argument's name to indicate\ - \ a missing argument. Trailing optional arguments (those not followed by an\ - \ argument that is present) may also be simply omitted.\n" + description: ' + + Computes an one-layer LSTM. This operator is usually supported via some + + custom implementation such as CuDNN. + + + Notations: + + + * `X` - input tensor + + * `i` - input gate + + * `o` - output gate + + * `f` - forget gate + + * `c` - cell gate + + * `t` - time step (t-1 means previous time step) + + * `W[iofc]` - W parameter weight matrix for input, output, forget, and cell + gates + + * `R[iofc]` - R recurrence weight matrix for input, output, forget, and cell + gates + + * `Wb[iofc]` - W bias vectors for input, output, forget, and cell gates + + * `Rb[iofc]` - R bias vectors for input, output, forget, and cell gates + + * `P[iof]` - P peephole weight vector for input, output, and forget gates + + * `WB[iofc]` - W parameter weight matrix for backward input, output, forget, + and cell gates + + * `RB[iofc]` - R recurrence weight matrix for backward input, output, forget, + and cell gates + + * `WBb[iofc]` - W bias vectors for backward input, output, forget, and cell + gates + + * `RBb[iofc]` - R bias vectors for backward input, output, forget, and cell + gates + + * `PB[iof]` - P peephole weight vector for backward input, output, and forget + gates + + * `H` - Hidden state + + * `num_directions` - 2 if direction == bidirectional else 1 + + + Activation functions: + + + * Relu(x) - max(0, x) + + * Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) + + * Sigmoid(x) - 1/(1 + e^{-x}) + + + NOTE: Below are optional + + + * Affine(x) - alpha*x + beta + + * LeakyRelu(x) - x if x >= 0 else alpha * x + + * ThresholdedRelu(x) - x if x >= alpha else 0 + + * ScaledTanh(x) - alpha*Tanh(beta*x) + + * HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) + + * Elu(x) - x if x >= 0 else alpha*(e^x - 1) + + * Softsign(x) - x/(1 + |x|) + + * Softplus(x) - log(1 + e^x) + + + Equations (Default: f=Sigmoid, g=Tanh, h=Tanh): + + + * it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi) + + * ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf) + + * ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc) + + * Ct = ft (.) Ct-1 + it (.) ct + + * ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo) + + * Ht = ot (.) h(Ct) + + This operator has **optional** inputs/outputs. See [the doc](IR.md) for more + details about the representation of optional arguments. An empty string may + be used in the place of an actual argument''s name to indicate a missing argument. + Trailing optional arguments (those not followed by an argument that is present) + may also be simply omitted. + + ' arguments: - X - W @@ -1505,17 +1642,16 @@ onnx::MaxPool: \ + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i]\ \ + 1)\n ```\n or\n ```\n output_spatial_shape[i] = ceil((input_spatial_shape[i]\ \ + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i]\ - \ + 1)\n ```\n if ceil_mode is enabled\n\n ```\n * pad_shape[i] is sum of\ - \ pads along axis i\n ```\n\n `auto_pad` is a DEPRECATED attribute. If you\ - \ are using them currently, the output spatial shape will be following:\n\ - \ ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i]\ - \ - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i])\n SAME_UPPER or\ - \ SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n\ - \ ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n\ - \ ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i]\ - \ + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i]\n\ - \ ```\n The output of each pooling window is maximum number of elements exclude\ - \ pad. \n " + \ + 1)\n ```\n if ceil_mode is enabled `pad_shape[i]` is the sum of pads along\ + \ axis `i`.\n\n `auto_pad` is a DEPRECATED attribute. If you are using them\ + \ currently, the output spatial shape will be following:\n ```\n VALID: output_spatial_shape[i]\ + \ = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i]\ + \ + 1) + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i]\ + \ = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n And pad\ + \ shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i]\ + \ = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i]\ + \ - 1) * dilations[i] + 1) - input_spatial_shape[i]\n ```\n The output of\ + \ each pooling window is maximum number of elements exclude pad. \n " arguments: - X expression_string: onnx_ops.maxpool(X, auto_pad, ceil_mode, dilations, kernel_shape, @@ -1572,8 +1708,7 @@ onnx::Mean: expression_string: onnx_ops.mean(data_0) onnx::MeanVarianceNormalization: description: "\n A MeanVarianceNormalization Function: Perform mean variance\ - \ normalization\n on the input tensor X using formula:
``` (X-EX)/sqrt(E(X-EX)^2)\ - \ ```\n" + \ normalization\n on the input tensor X using formula: `(X-EX)/sqrt(E(X-EX)^2)`\n" arguments: - X expression_string: onnx_ops.meanvariancenormalization(X, axes) @@ -1594,17 +1729,17 @@ onnx::Min: expression_string: onnx_ops.min(data_0) onnx::Mod: description: "\n Performs element-wise binary modulus (with Numpy-style broadcasting\ - \ support).\n The sign of the remainder is the same as that of the Divisor.\n\ - \n Mod operator can also behave like C fmod() or numpy.fmod. In this case,\ - \ the sign of the remainder however, will be the same as the Dividend\n \ - \ (in contrast to integer mod). To force a behavior like numpy.fmod() an\ - \ 'fmod' Attribute is provided.\n This attribute is set to 0 by default\ - \ causing the behavior to be like integer mod.\n Setting this attribute\ - \ to 1 causes the remainder to be calculated similar to that of numpy.fmod().\n\ - \n If the input type is floating point, then `fmod` attribute must be set\ - \ to 1.\n\n In case of dividend being zero, the results will be platform\ - \ dependent.\n\n This operator supports **multidirectional (i.e., Numpy-style)\ - \ broadcasting**; for more details please check [the doc](Broadcasting.md).\n" + \ support).\n The sign of the remainder is the same as that of the Divisor.\n\ + \n Mod operator can also behave like C fmod() or numpy.fmod. In this case,\ + \ the sign of the remainder however, will be the same as the Dividend\n (in\ + \ contrast to integer mod). To force a behavior like numpy.fmod() an 'fmod'\ + \ Attribute is provided.\n This attribute is set to 0 by default causing\ + \ the behavior to be like integer mod.\n Setting this attribute to 1 causes\ + \ the remainder to be calculated similar to that of numpy.fmod().\n\n If\ + \ the input type is floating point, then `fmod` attribute must be set to 1.\n\ + \n In case of dividend being zero, the results will be platform dependent.\n\ + \n This operator supports **multidirectional (i.e., Numpy-style) broadcasting**;\ + \ for more details please check [the doc](Broadcasting.md).\n" arguments: - A - B @@ -1662,39 +1797,37 @@ onnx::NegativeLogLikelihoodLoss: \ class labels (one of C classes)\nor it may contain a special value (indicated\ \ by an attribute ignore_index) for N x d1 x d2 x ... x dk samples.\nThe loss\ \ value for input[n, :, d_1, d_2,...d_k] being classified as class c = target[n][d_1][d_2]...[d_k]\ - \ is computed as:\n\n loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k].\n\ - \nWhen an optional \"weight\" is provided, the sample loss is calculated as:\n\ - \n loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k] * weight[c].\n\ - \nloss is zero for the case when target-value equals ignore_index.\n\n \ - \ loss[n][d_1][d_2]...[d_k] = 0, when target[n][d_1][d_2]...[d_k] = ignore_index\n\ - \nIf \"reduction\" attribute is set to \"none\", the operator's output will\ - \ be the above loss with shape (N, d1, d2, ..., dk).\nIf \"reduction\" attribute\ - \ is set to \"mean\" (the default attribute value), the output loss is (weight)\ - \ averaged:\n\n mean(loss), if \"weight\" is not provided,\n\nor if weight\ - \ is provided,\n\n sum(loss) / sum(weight[target[n][d_1][d_2]...[d_k]]]),\ - \ for all samples.\n\nIf \"reduction\" attribute is set to \"sum\", the output\ - \ is a scalar:\n sum(loss).\n\nSee also https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss.\n\ - \nExample 1:\n\n // negative log likelihood loss, \"none\" reduction\n\ - \ N, C, d1 = 2, 3, 2\n input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],\n\ - \ [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]\n target = [[2, 1], [0,\ - \ 2]]\n\n loss = np.zeros((N, d1))\n for n in range(N):\n for\ - \ d_1 in range(d1):\n c = target[n][d_1]\n loss[n][d_1]\ - \ = -input[n][c][d_1]\n\n // print(loss)\n // [[-3. -2.]\n // [-0.\ - \ -2.]]\n\nExample 2:\n\n // weighted negative log likelihood loss, sum\ - \ reduction\n N, C, d1 = 2, 3, 2\n input = [[[1.0, 2.0], [2.0, 2.0],\ - \ [3.0, 2.0]],\n [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]\n target\ - \ = [[2, 1], [0, 2]]\n weight = [0.2, 0.3, 0.1]\n loss = np.zeros((N,\ - \ d1))\n for n in range(N):\n for d_1 in range(d1):\n \ - \ c = target[n][d_1]\n loss[n][d_1] = -input[n][c][d_1] * weight[c]\n\ - \n loss = np.sum(loss)\n // print(loss)\n // -1.1\n\nExample 3:\n\ - \n // weighted negative log likelihood loss, mean reduction\n N, C,\ - \ d1 = 2, 3, 2\n input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],\n \ - \ [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]\n target = [[2, 1], [0, 2]]\n\ - \ weight = [0.2, 0.3, 0.1]\n loss = np.zeros((N, d1))\n weight_total\ - \ = 0\n for n in range(N):\n for d_1 in range(d1):\n \ - \ c = target[n][d_1]\n loss[n][d_1] = -input[n][c][d_1] * weight[c]\n\ - \ weight_total = weight_total + weight[c]\n\n loss = np.sum(loss)\ - \ / weight_total\n // print(loss)\n // -1.57\n" + \ is computed as:\n\n```\nloss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k].\n\ + ```\n\nWhen an optional \"weight\" is provided, the sample loss is calculated\ + \ as:\n\n```\nloss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k] *\ + \ weight[c].\n```\n\nloss is zero for the case when target-value equals ignore_index.\n\ + \n```\nloss[n][d_1][d_2]...[d_k] = 0, when target[n][d_1][d_2]...[d_k] = ignore_index\n\ + ```\n\nIf \"reduction\" attribute is set to \"none\", the operator's output\ + \ will be the above loss with shape (N, d1, d2, ..., dk).\nIf \"reduction\"\ + \ attribute is set to \"mean\" (the default attribute value), the output loss\ + \ is (weight) averaged:\n\n```\nmean(loss), if \"weight\" is not provided,\n\ + ```\n\nor if weight is provided,\n\n```\nsum(loss) / sum(weight[target[n][d_1][d_2]...[d_k]]]),\ + \ for all samples.\n```\n\nIf \"reduction\" attribute is set to \"sum\", the\ + \ output is a scalar: `sum(loss)`.\n\nSee also https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss.\n\ + \nExample 1:\n\n```\n// negative log likelihood loss, \"none\" reduction\n\ + N, C, d1 = 2, 3, 2\ninput = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],\n \ + \ [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]\ntarget = [[2, 1], [0, 2]]\n\nloss\ + \ = np.zeros((N, d1))\nfor n in range(N):\n for d_1 in range(d1):\n \ + \ c = target[n][d_1]\n loss[n][d_1] = -input[n][c][d_1]\n\n// print(loss)\n\ + // [[-3. -2.]\n// [-0. -2.]]\n```\n\nExample 2:\n\n```\n// weighted negative\ + \ log likelihood loss, sum reduction\nN, C, d1 = 2, 3, 2\ninput = [[[1.0,\ + \ 2.0], [2.0, 2.0], [3.0, 2.0]],\n [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]\n\ + target = [[2, 1], [0, 2]]\nweight = [0.2, 0.3, 0.1]\nloss = np.zeros((N, d1))\n\ + for n in range(N):\n for d_1 in range(d1):\n c = target[n][d_1]\n\ + \ loss[n][d_1] = -input[n][c][d_1] * weight[c]\n\nloss = np.sum(loss)\n\ + // print(loss)\n// -1.1\n```\n\nExample 3:\n\n```\n// weighted negative log\ + \ likelihood loss, mean reduction\nN, C, d1 = 2, 3, 2\ninput = [[[1.0, 2.0],\ + \ [2.0, 2.0], [3.0, 2.0]],\n [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]\n\ + target = [[2, 1], [0, 2]]\nweight = [0.2, 0.3, 0.1]\nloss = np.zeros((N, d1))\n\ + weight_total = 0\nfor n in range(N):\n for d_1 in range(d1):\n c\ + \ = target[n][d_1]\n loss[n][d_1] = -input[n][c][d_1] * weight[c]\n\ + \ weight_total = weight_total + weight[c]\n\nloss = np.sum(loss) /\ + \ weight_total\n// print(loss)\n// -1.57\n```\n" arguments: - input - target @@ -1985,7 +2118,7 @@ onnx::QuantizeLinear: For saturation, it saturates to [0, 255] if it''s uint8, or [-128, 127] if it''s int8. - For (x / y_scale), it''s rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding + For (x / y_scale), it''s rounding to the nearest even. Refer to https://en.wikipedia.org/wiki/Rounding for details. ''y_zero_point'' and ''y'' must have same type. ' @@ -1995,30 +2128,85 @@ onnx::QuantizeLinear: - y_zero_point expression_string: onnx_ops.quantizelinear(x, y_scale, y_zero_point, axis) onnx::RNN: - description: "\nComputes an one-layer simple RNN. This operator is usually supported\n\ - via some custom implementation such as CuDNN.\n\nNotations:\n\n`X` - input\ - \ tensor\n\n`i` - input gate\n\n`t` - time step (t-1 means previous time step)\n\ - \n`Wi` - W parameter weight matrix for input gate\n\n`Ri` - R recurrence weight\ - \ matrix for input gate\n\n`Wbi` - W parameter bias vector for input gate\n\ - \n`Rbi` - R parameter bias vector for input gate\n\n`WBi` - W parameter weight\ - \ matrix for backward input gate\n\n`RBi` - R recurrence weight matrix for\ - \ backward input gate\n\n`WBbi` - WR bias vectors for backward input gate\n\ - \n`RBbi` - RR bias vectors for backward input gate\n\n`H` - Hidden state\n\ - \n`num_directions` - 2 if direction == bidirectional else 1\n\nActivation\ - \ functions:\n\n Relu(x) - max(0, x)\n\n Tanh(x) \ - \ - (1 - e^{-2x})/(1 + e^{-2x})\n\n Sigmoid(x) - 1/(1\ - \ + e^{-x})\n\n (NOTE: Below are optional)\n\n Affine(x) -\ - \ alpha*x + beta\n\n LeakyRelu(x) - x if x >= 0 else alpha * x\n\ - \n ThresholdedRelu(x) - x if x >= alpha else 0\n\n ScaledTanh(x) \ - \ - alpha*Tanh(beta*x)\n\n HardSigmoid(x) - min(max(alpha*x\ - \ + beta, 0), 1)\n\n Elu(x) - x if x >= 0 else alpha*(e^x\ - \ - 1)\n\n Softsign(x) - x/(1 + |x|)\n\n Softplus(x) \ - \ - log(1 + e^x)\n\nEquations (Default: f=Tanh):\n\n - Ht = f(Xt*(Wi^T)\ - \ + Ht-1*(Ri^T) + Wbi + Rbi)\nThis operator has **optional** inputs/outputs.\ - \ See [the doc](IR.md) for more details about the representation of optional\ - \ arguments. An empty string may be used in the place of an actual argument's\ - \ name to indicate a missing argument. Trailing optional arguments (those\ - \ not followed by an argument that is present) may also be simply omitted.\n" + description: ' + + Computes an one-layer simple RNN. This operator is usually supported + + via some custom implementation such as CuDNN. + + + Notations: + + + * `X` - input tensor + + * `i` - input gate + + * `t` - time step (t-1 means previous time step) + + * `Wi` - W parameter weight matrix for input gate + + * `Ri` - R recurrence weight matrix for input gate + + * `Wbi` - W parameter bias vector for input gate + + * `Rbi` - R parameter bias vector for input gate + + * `WBi` - W parameter weight matrix for backward input gate + + * `RBi` - R recurrence weight matrix for backward input gate + + * `WBbi` - WR bias vectors for backward input gate + + * `RBbi` - RR bias vectors for backward input gate + + * `H` - Hidden state + + * `num_directions` - 2 if direction == bidirectional else 1 + + + Activation functions: + + + * Relu(x) - max(0, x) + + * Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) + + * Sigmoid(x) - 1/(1 + e^{-x}) + + + NOTE: Below are optional + + + * Affine(x) - alpha*x + beta + + * LeakyRelu(x) - x if x >= 0 else alpha * x + + * ThresholdedRelu(x) - x if x >= alpha else 0 + + * ScaledTanh(x) - alpha*Tanh(beta*x) + + * HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) + + * Elu(x) - x if x >= 0 else alpha*(e^x - 1) + + * Softsign(x) - x/(1 + |x|) + + * Softplus(x) - log(1 + e^x) + + + Equations (Default: f=Tanh): + + + * Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi) + + This operator has **optional** inputs/outputs. See [the doc](IR.md) for more + details about the representation of optional arguments. An empty string may + be used in the place of an actual argument''s name to indicate a missing argument. + Trailing optional arguments (those not followed by an argument that is present) + may also be simply omitted. + + ' arguments: - X - W @@ -2117,50 +2305,15 @@ onnx::RandomUniformLike: - input expression_string: onnx_ops.randomuniformlike(input, dtype, high, low, seed) onnx::Range: - description: ' - - Generate a tensor containing a sequence of numbers that begin at `start` and - extends by increments of `delta` - - up to `limit` (exclusive). - - - The number of elements in the output of range is computed as below- - - - `number_of_elements = max( ceil( (limit - start) / delta ) , 0 )` - - - The pseudocode determining the contents of the output is shown below- - - - `for(int i=0; i= 1 in\ - \ case of K-dimensional loss.\nshape(labels): (N) where each value is 0 <=\ - \ labels[i] <= C-1, or (N, D1, D2,..., Dk),\n with K >= 1 in case of\ - \ K-dimensional loss.\n\nThe loss for one sample, l_i, can caculated as follows:\n\ - \ l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk], where i is the index of\ - \ classes.\nor\n l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk] * weights[c],\ - \ if 'weights' is provided.\n\nloss is zero for the case when label-value\ - \ equals ignore_index.\n l[i][d1][d2]...[dk] = 0, when labels[n][d1][d2]...[dk]\ - \ = ignore_index\n\nwhere:\n p = Softmax(scores)\n y = Log(p)\n c\ - \ = labels[i][d1][d2]...[dk]\n\nFinally, L is optionally reduced:\nIf reduction\ - \ = 'none', the output is L with shape (N, D1, D2, ..., Dk).\nIf reduction\ - \ = 'sum', the output is scalar: Sum(L).\nIf reduction = 'mean', the output\ - \ is scalar: ReduceMean(L), or if weight is provided: ReduceSum(L) / ReduceSum(W),\n\ - where tensor W is of shape (N, D1, D2, ..., Dk) and W[n][d1][d2]...[dk] =\ - \ weights[labels[i][d1][d2]...[dk]].\n" + \ optionally do a reduction operator.\n\n* shape(scores): (N, C) where C is\ + \ the number of classes, or (N, C, D1, D2,..., Dk),\n with K >= 1 in case\ + \ of K-dimensional loss.\n* shape(labels): (N) where each value is 0 <= labels[i]\ + \ <= C-1, or (N, D1, D2,..., Dk),\n with K >= 1 in case of K-dimensional\ + \ loss.\n\nThe loss for one sample, l_i, can caculated as follows:\n```\n\ + l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk], where i is the index of classes.\n\ + ```\nor\n```\nl[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk] * weights[c], if\ + \ 'weights' is provided.\n```\n\nloss is zero for the case when label-value\ + \ equals ignore_index.\n```\nl[i][d1][d2]...[dk] = 0, when labels[n][d1][d2]...[dk]\ + \ = ignore_index\n```\n\nwhere:\n```\np = Softmax(scores)\ny = Log(p)\nc =\ + \ labels[i][d1][d2]...[dk]\n```\n\nFinally, L is optionally reduced:\n\n*\ + \ If reduction = 'none', the output is L with shape (N, D1, D2, ..., Dk).\n\ + * If reduction = 'sum', the output is scalar: Sum(L).\n* If reduction = 'mean',\ + \ the output is scalar: ReduceMean(L), or if weight is provided: `ReduceSum(L)\ + \ / ReduceSum(W)`,\n where tensor W is of shape `(N, D1, D2, ..., Dk)` and\ + \ `W[n][d1][d2]...[dk] = weights[labels[i][d1][d2]...[dk]]`.\n" arguments: - scores - labels @@ -2984,29 +3185,35 @@ onnx::Split: - split expression_string: onnx_ops.split(input, split, axis) onnx::SplitToSequence: - description: 'Split a tensor into a sequence of tensors, along the specified + description: ' + + Split a tensor into a sequence of tensors, along the specified ''axis''. + + Lengths of the parts can be specified using the optional argument ''split''. - ''axis''. Lengths of the parts can be specified using argument ''split''. + If the argument `split'' is not specified, a default scalar value of 1 + + is used as the value of `split''. ''split'' must contain only positive numbers. ''split'' is either a scalar (tensor of empty shape), or a 1-D tensor. - If ''split'' is a scalar, then ''input'' will be split into equally sized - chunks(if possible). + If ''split'' is a scalar, then ''input'' will be split into chunks all of + size ''split'' - Last chunk will be smaller if the ''input'' size along the given axis ''axis'' - is not divisible + if possible. The last chunk alone may be smaller than ''split'' if the ''input'' + size - by ''split''. + along the given axis ''axis'' is not divisible by ''split''. - Otherwise, the tensor is split into ''size(split)'' chunks, with lengths of - the parts on ''axis'' + If ''split'' is a 1-dimensional tensor, the input tensor is split into ''size(split)'' + chunks, - specified in ''split''. In this scenario, the sum of entries in ''split'' - must be equal to the + with lengths of the parts on ''axis'' specified in ''split''. In this scenario, + the sum of entries - dimension size of input tensor on ''axis''. + in ''split'' must be equal to the dimension size of input tensor on ''axis''. ' arguments: @@ -3229,16 +3436,16 @@ onnx::Tile: onnx::TopK: description: "\nRetrieve the top-K largest or smallest elements along a specified\ \ axis. Given an input tensor of\nshape [a_1, a_2, ..., a_n, r] and integer\ - \ argument k, return two outputs:\n -Value tensor of shape [a_1, a_2, ...,\ - \ a_{axis-1}, k, a_{axis+1}, ... a_n]\n which contains the values of the\ - \ top k elements along the specified axis\n -Index tensor of shape [a_1,\ - \ a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] which\n contains the indices\ - \ of the top k elements (original indices from the input\n tensor).\n\n\ - If \"largest\" is 1 (the default value) then the k largest elements are returned.\n\ - If \"sorted\" is 1 (the default value) then the resulting k elements will\ - \ be sorted.\nIf \"sorted\" is 0, order of returned 'Values' and 'Indices'\ + \ argument k, return two outputs:\n\n* Value tensor of shape [a_1, a_2, ...,\ + \ a_{axis-1}, k, a_{axis+1}, ... a_n]\n which contains the values of the\ + \ top k elements along the specified axis\n* Index tensor of shape [a_1, a_2,\ + \ ..., a_{axis-1}, k, a_{axis+1}, ... a_n] which\n contains the indices of\ + \ the top k elements (original indices from the input\n tensor).\n\n* If\ + \ \"largest\" is 1 (the default value) then the k largest elements are returned.\n\ + * If \"sorted\" is 1 (the default value) then the resulting k elements will\ + \ be sorted.\n* If \"sorted\" is 0, order of returned 'Values' and 'Indices'\ \ are undefined.\n\nGiven two equivalent values, this operator uses the indices\ - \ along the axis as\n a tiebreaker. That is, the element with the lower index\ + \ along the axis as\na tiebreaker. That is, the element with the lower index\ \ will appear first.\n" arguments: - X @@ -3311,42 +3518,58 @@ onnx::Unique: \ of 'Y' in the input.\n\nOutputs are either sorted in ascending order or\ \ optionally in the order of the first occurrence of the values in the input.\n\ \nhttps://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html\n\ - \nExample 1:\n input_X = [2, 1, 1, 3, 4, 3]\n attribute_sorted = 0\n attribute_axis\ - \ = None\n output_Y = [2, 1, 3, 4]\n output_indices = [0, 1, 3, 4]\n output_inverse_indices\ - \ = [0, 1, 1, 2, 3, 2]\n output_counts = [1, 2, 2, 1]\n\nExample 2:\n input_X\ - \ = [[1, 3], [2, 3]]\n attribute_sorted = 1\n attribute_axis = None\n output_Y\ - \ = [1, 2, 3]\n output_indices = [0, 2, 1]\n output_inverse_indices = [0,\ - \ 2, 1, 2]\n output_counts = [1, 1, 2]\n\nExample 3:\n input_X = [[1, 0,\ - \ 0], [1, 0, 0], [2, 3, 4]]\n attribute_sorted = 1\n attribute_axis = 0\n\ - \ output_Y = [[1, 0, 0], [2, 3, 4]]\n output_indices = [0, 2]\n output_inverse_indices\ - \ = [0, 0, 1]\n output_counts = [2, 1]\n\nExample 4:\n input_x = [[[1.,\ - \ 1.], [0., 1.], [2., 1.], [0., 1.]],\n [[1., 1.], [0., 1.], [2.,\ - \ 1.], [0., 1.]]]\n attribute_sorted = 1\n attribute_axis = 1\n\n intermediate\ - \ data are presented below for better understanding:\n\n there are 4 subtensors\ - \ sliced along axis 1 of input_x (shape = (2, 4, 2)):\n A: [[1, 1], [1, 1]],\n\ - \ [[0, 1], [0, 1]],\n [[2, 1], [2, 1]],\n [[0, 1], [0, 1]].\n\n\ - \ there are 3 unique subtensors:\n [[1, 1], [1, 1]],\n [[0, 1], [0, 1]],\n\ - \ [[2, 1], [2, 1]].\n\n sorted unique subtensors:\n B: [[0, 1], [0, 1]],\n\ - \ [[1, 1], [1, 1]],\n [[2, 1], [2, 1]].\n\n output_Y is constructed\ - \ from B:\n [[[0. 1.], [1. 1.], [2. 1.]],\n [[0. 1.], [1. 1.], [2. 1.]]]\n\ - \n output_indices is to map from B to A:\n [1, 0, 2]\n\n output_inverse_indices\ - \ is to map from A to B:\n [1, 0, 2, 0]\n\n output_counts = [2 1 1]\n" + \nExample 1:\n```\ninput_X = [2, 1, 1, 3, 4, 3]\nattribute_sorted = 0\nattribute_axis\ + \ = None\noutput_Y = [2, 1, 3, 4]\noutput_indices = [0, 1, 3, 4]\noutput_inverse_indices\ + \ = [0, 1, 1, 2, 3, 2]\noutput_counts = [1, 2, 2, 1]\n```\n\nExample 2:\n\ + ```\ninput_X = [[1, 3], [2, 3]]\nattribute_sorted = 1\nattribute_axis = None\n\ + output_Y = [1, 2, 3]\noutput_indices = [0, 2, 1]\noutput_inverse_indices =\ + \ [0, 2, 1, 2]\noutput_counts = [1, 1, 2]\n```\n\nExample 3:\n```\ninput_X\ + \ = [[1, 0, 0], [1, 0, 0], [2, 3, 4]]\nattribute_sorted = 1\nattribute_axis\ + \ = 0\noutput_Y = [[1, 0, 0], [2, 3, 4]]\noutput_indices = [0, 2]\noutput_inverse_indices\ + \ = [0, 0, 1]\noutput_counts = [2, 1]\n```\n\nExample 4:\n```\ninput_x = [[[1.,\ + \ 1.], [0., 1.], [2., 1.], [0., 1.]],\n [[1., 1.], [0., 1.], [2.,\ + \ 1.], [0., 1.]]]\nattribute_sorted = 1\nattribute_axis = 1\n```\n\nintermediate\ + \ data are presented below for better understanding:\nthere are 4 subtensors\ + \ sliced along axis 1 of input_x (shape = (2, 4, 2)):\n```\nA: [[1, 1], [1,\ + \ 1]],\n [[0, 1], [0, 1]],\n [[2, 1], [2, 1]],\n [[0, 1], [0, 1]].\n\ + ```\n\nthere are 3 unique subtensors:\n```\n[[1, 1], [1, 1]],\n[[0, 1], [0,\ + \ 1]],\n[[2, 1], [2, 1]].\n```\n\nsorted unique subtensors:\n```\nB: [[0,\ + \ 1], [0, 1]],\n [[1, 1], [1, 1]],\n [[2, 1], [2, 1]].\n```\n\noutput_Y\ + \ is constructed from B:\n```\n[[[0. 1.], [1. 1.], [2. 1.]],\n [[0. 1.], [1.\ + \ 1.], [2. 1.]]]\n```\n\noutput_indices is to map from B to A:\n```\n[1, 0,\ + \ 2]\n```\n\noutput_inverse_indices is to map from A to B:\n```\n[1, 0, 2,\ + \ 0]\n```\n\noutput_counts:\n```\n[2, 1, 1]\n```\n" arguments: - X expression_string: onnx_ops.unique(X, axis, sorted) onnx::Unsqueeze: - description: "\nInsert single-dimensional entries to the shape of an input tensor\ - \ (`data`).\nTakes one required input `axes` - which contains a list of dimension\ - \ indices and this operator will insert a dimension of value `1` into the\ - \ corresponding index of the output tensor (`expanded`).\n\nFor example:\n\ - \ Given an input tensor (`data`) of shape [3, 4, 5], then\n Unsqueeze(data,\ - \ axes=[0, 4]) outputs a tensor (`expanded`) containing same data as `data`\ - \ but with shape [1, 3, 4, 5, 1].\n\nThe input `axes` should not contain any\ - \ duplicate entries. It is an error if it contains duplicates.\nThe rank of\ - \ the output tensor (`output_rank`) is the rank of the input tensor (`data`)\ - \ plus the number of values in `axes`.\nEach value in `axes` should be within\ - \ the (inclusive) range [-output_rank , output_rank - 1].\nThe order of values\ - \ in `axes` does not matter and can come in any order.\n\n" + description: ' + + Insert single-dimensional entries to the shape of an input tensor (`data`). + + Takes one required input `axes` - which contains a list of dimension indices + and this operator will insert a dimension of value `1` into the corresponding + index of the output tensor (`expanded`). + + + For example, given an input tensor (`data`) of shape [3, 4, 5], then + + Unsqueeze(data, axes=[0, 4]) outputs a tensor (`expanded`) containing same + data as `data` but with shape [1, 3, 4, 5, 1]. + + + The input `axes` should not contain any duplicate entries. It is an error + if it contains duplicates. + + The rank of the output tensor (`output_rank`) is the rank of the input tensor + (`data`) plus the number of values in `axes`. + + Each value in `axes` should be within the (inclusive) range [-output_rank + , output_rank - 1]. + + The order of values in `axes` does not matter and can come in any order. + + ' arguments: - data - axes @@ -3400,19 +3623,19 @@ pattern_matching_function: - productions - goal - retrieval - expression_string: pattern_matching_function(productions,goal,retrieval) + expression_string: actr.pattern_matching_function(productions,goal,retrieval) pattern_to_string: description: Converts a pattern dictionary to a string format. arguments: - chunk - expression_string: pattern_to_string(chunk) + expression_string: actr.pattern_to_string(chunk) retrieve_chunk: description: Retrieve a chunk from declarative memory given a pattern. arguments: - pattern - dm_chunks - types - expression_string: retrieve_chunk(pattern,dm_chunks,types) + expression_string: actr.retrieve_chunk(pattern,dm_chunks,types) sin: description: Sine function arguments: @@ -3442,14 +3665,14 @@ update_buffer: arguments: - production - buffer - expression_string: update_buffer(production,buffer) + expression_string: actr.update_buffer(production,buffer) update_goal: description: Returns a pattern to update the goal buffer with. arguments: - production - expression_string: update_goal(production) + expression_string: actr.update_goal(production) update_retrieval: description: Returns a pattern to update the retrieval buffer with. arguments: - production - expression_string: update_retrieval(production) + expression_string: actr.update_retrieval(production) diff --git a/docs/sphinx/source/api/Installation.md b/docs/sphinx/source/api/Installation.md index 912094300..810046571 100644 --- a/docs/sphinx/source/api/Installation.md +++ b/docs/sphinx/source/api/Installation.md @@ -123,6 +123,7 @@ make clean make html ``` + ### 7) Change directory into html folder and run the documentation offline ``` # for Windows go into build\html folder and double click on the index.html file, or: diff --git a/docs/sphinx/source/api/MDF_function_specifications.md b/docs/sphinx/source/api/MDF_function_specifications.md index 9399cd35f..db58d62bb 100644 --- a/docs/sphinx/source/api/MDF_function_specifications.md +++ b/docs/sphinx/source/api/MDF_function_specifications.md @@ -215,34 +215,34 @@ Python version: `A * (A > 0)` ## change_goal

Modifies the current goal buffer using the given pattern.

-

change_goal(pattern, curr_goal) = change_goal(pattern,curr_goal)

+

change_goal(pattern, curr_goal) = actr.change_goal(pattern,curr_goal)

-Python version: `change_goal(pattern,curr_goal)` +Python version: `actr.change_goal(pattern,curr_goal)` ## check_termination

Function used to check if no production was selected.

-

check_termination(production) = check_termination(production)

+

check_termination(production) = actr.check_termination(production)

-Python version: `check_termination(production)` +Python version: `actr.check_termination(production)` ## chunk_to_string

Converts a chunk dictionary to a string format.

-

chunk_to_string(chunk) = chunk_to_string(chunk)

+

chunk_to_string(chunk) = actr.chunk_to_string(chunk)

-Python version: `chunk_to_string(chunk)` +Python version: `actr.chunk_to_string(chunk)` ## conflict_resolution_function

ACT-R conflict resolution function. Currently selects a production at random from the already matched productions, since utility values and learning are not implemented yet.

-

conflict_resolution_function(productions) = conflict_resolution_function(productions)

+

conflict_resolution_function(productions) = actr.conflict_resolution_function(productions)

-Python version: `conflict_resolution_function(productions)` +Python version: `actr.conflict_resolution_function(productions)` @@ -265,9 +265,9 @@ Python version: `scale * numpy.cosh(variable0)` ## drift_diffusion_integrator

Integrates the drift diffusion model for a single trial using and implementation of the using the Euler-Maruyama method. This is a proof of concept implementation and is not optimized for speed.

-

drift_diffusion_integrator(starting_point, non_decision_time, drift_rate, threshold, noise, dt) = drift_diffusion_integrator(starting_point,non_decision_time,drift_rate,threshold,noise,dt)

+

drift_diffusion_integrator(starting_point, non_decision_time, drift_rate, threshold, noise, dt) = ddm.drift_diffusion_integrator(starting_point,non_decision_time,drift_rate,threshold,noise,dt)

-Python version: `drift_diffusion_integrator(starting_point,non_decision_time,drift_rate,threshold,noise,dt)` +Python version: `ddm.drift_diffusion_integrator(starting_point,non_decision_time,drift_rate,threshold,noise,dt)` @@ -297,9 +297,9 @@ Python version: `1/(1 + numpy.exp(-1*gain*(variable0 + bias) + offset))` ## match_production

Returns True if the production's left hand side matches the given context and adds the matching bindings to the production.

-

match_production(production, context) = match_production(production,context)

+

match_production(production, context) = actr.match_production(production,context)

-Python version: `match_production(production,context)` +Python version: `actr.match_production(production,context)` @@ -440,11 +440,11 @@ Python version: `onnx_ops.anumpy.tanh(input)` subset of the input tensor according to the kernel size and downsampling the data into the output tensor Y for further processing. The output spatial shape will be following: ``` - output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1) + output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1) ``` or ``` - output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1) + output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1) ``` if ceil_mode is enabled @@ -454,12 +454,12 @@ Python version: `onnx_ops.anumpy.tanh(input)` `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following: ``` - VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i]) + VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i]) SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i]) ``` And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`: ``` - pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i] + pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i] ``` The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero).

@@ -480,8 +480,8 @@ statistics in inference mode (training_mode=False, default), and the running statistics in training mode (training_mode=True). There are multiple cases for the number of outputs, which we list below: -Output case #1: Y, running_mean, running_var (training_mode=True) -Output case #2: Y (training_mode=False) +* Output case #1: Y, running_mean, running_var (training_mode=True) +* Output case #2: Y (training_mode=False) When training_mode=False, extra outputs are invalid. The outputs are updated as follows when training_mode=True: @@ -490,17 +490,15 @@ running_mean = input_mean * momentum + current_mean * (1 - momentum) running_var = input_var * momentum + current_var * (1 - momentum) Y = (X - current_mean) / sqrt(current_var + epsilon) * scale + B - +``` where: - +``` current_mean = ReduceMean(X, axis=all_except_channel_index) current_var = ReduceVar(X, axis=all_except_channel_index) - -Notice that ReduceVar refers to the population variance, and it equals to -sum(sqrd(x_i - x_avg)) / N -where N is the population size (this formula does not use sample size N - 1). - ``` +Notice that `ReduceVar` refers to the population variance, and it equals to +`sum(sqrd(x_i - x_avg)) / N` +where `N` is the population size (this formula does not use sample size `N - 1`). The computation of ReduceMean and ReduceVar uses float to avoid overflow for float16 inputs. @@ -537,16 +535,16 @@ Python version: `onnx_ops.bernoulli(input, dtype, seed)` ## BitShift

Bitwise shift operator performs element-wise operation. For each input element, if the - attribute "direction" is "RIGHT", this operator moves its binary representation toward - the right side so that the input value is effectively decreased. If the attribute "direction" - is "LEFT", bits of binary representation moves toward the left side, which results the - increase of its actual value. The input X is the tensor to be shifted and another input - Y specifies the amounts of shifting. For example, if "direction" is "Right", X is [1, 4], - and S is [1, 1], the corresponding output Z would be [0, 2]. If "direction" is "LEFT" with - X=[1, 2] and S=[1, 2], the corresponding output Y would be [2, 8]. - - Because this operator supports Numpy-style broadcasting, X's and Y's shapes are - not necessarily identical. +attribute "direction" is "RIGHT", this operator moves its binary representation toward +the right side so that the input value is effectively decreased. If the attribute "direction" +is "LEFT", bits of binary representation moves toward the left side, which results the +increase of its actual value. The input X is the tensor to be shifted and another input +Y specifies the amounts of shifting. For example, if "direction" is "Right", X is [1, 4], +and S is [1, 1], the corresponding output Z would be [0, 2]. If "direction" is "LEFT" with +X=[1, 2] and S=[1, 2], the corresponding output Y would be [2, 8]. + +Because this operator supports Numpy-style broadcasting, X's and Y's shapes are +not necessarily identical. This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md).

Python version: `onnx_ops.bitshift(X, Y, direction)` @@ -563,7 +561,7 @@ in the 'DataType' enum field in the TensorProto message. Casting from string tensor in plain (e.g., "3.14" and "1000") and scientific numeric representations (e.g., "1e-5" and "1E8") to float types is supported. For example, converting string "100.5" to an integer may -result 100. There are some string literals reserved for special floating-point values; +yield result 100. There are some string literals reserved for special floating-point values; "+INF" (and "INF"), "-INF", and "NaN" are positive infinity, negative infinity, and not-a-number, respectively. Any string which can exactly match "+INF" in a case-insensitive way would be mapped to positive infinite. Similarly, this case-insensitive rule is applied to "INF" and "NaN". When casting from numeric tensors @@ -585,7 +583,7 @@ In more detail, the conversion among numerical types should follow these rules: * Casting from fixed point to: * floating point: +/- infinity if OOR. (+ infinity in the case of uint) * fixed point: when OOR, discard higher bits and reinterpret (with respect to two's complement representation for -signed types). For example, 200 (int16) -> -56 (int8). + signed types). For example, 200 (int16) -> -56 (int8). * bool: zero to False; nonzero to True. * Casting from bool to: * floating point: `{1.0, 0.0}`. @@ -614,7 +612,7 @@ Python version: `onnx_ops.castlike(input, target_type)`

Ceil takes one input data (Tensor) and produces one output data (Tensor) where the ceil is, y = ceil(x), is applied to -the tensor elementwise. +the tensor elementwise. If x is integral, +0, -0, NaN, or infinite, x itself is returned.

Python version: `onnx_ops.ceil(X)` @@ -803,26 +801,22 @@ and width dimensions. By default, `mode` = `DCR`. In the DCR mode, elements along the depth dimension from the input tensor are rearranged in the following order: depth, column, and then row. The output y is computed from the input x as below: +``` b, c, h, w = x.shape - tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w]) - tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2]) - y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize]) - +``` In the CRD mode, elements along the depth dimension from the input tensor are rearranged in the following order: column, row, and the depth. The output y is computed from the input x as below: +``` b, c, h, w = x.shape - tmp = np.reshape(x, [b, c // (blocksize ** 2), blocksize, blocksize, h, w]) - tmp = np.transpose(tmp, [0, 1, 4, 2, 5, 3]) - y = np.reshape(tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize]) - +```

Python version: `onnx_ops.depthtospace(input, blocksize, mode)` @@ -833,9 +827,9 @@ Python version: `onnx_ops.depthtospace(input, blocksize, mode)` ## DequantizeLinear

The linear dequantization operator. It consumes a quantized tensor, a scale, and a zero point to compute the full precision tensor. -The dequantization formula is y = (x - x_zero_point) * x_scale. 'x_scale' and 'x_zero_point' must have same shape, and can be either a scalar +The dequantization formula is `y = (x - x_zero_point) * x_scale`. `x_scale` and `x_zero_point` must have same shape, and can be either a scalar for per-tensor / per layer quantization, or a 1-D tensor for per-axis quantization. -'x_zero_point' and 'x' must have same type. 'x' and 'y' must have same shape. In the case of dequantizing int32, +`x_zero_point` and `x` must have same type. `x` and `y` must have same shape. In the case of dequantizing int32, there's no zero point (zero point is supposed to be 0).

@@ -899,24 +893,29 @@ A Function to fuse calculation for Scale, Zero Point and FP32->8Bit convertion o Outputs Scale, ZeroPoint and Quantized Input for a given FP32 Input. Scale is calculated as: ``` - y_scale = (max(x) - min(x))/(qmax - qmin) - * where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8 - * data range is adjusted to include 0. +y_scale = (max(x) - min(x))/(qmax - qmin) ``` + +* where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8 +* data range is adjusted to include 0. + Zero point is calculated as: ``` intermediate_zero_point = qmin - min(x)/y_scale y_zero_point = cast(round(saturate(itermediate_zero_point))) +``` + * where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8 * for saturation, it saturates to [0, 255] if it's uint8, or [-127, 127] if it's int8. Right now only uint8 is supported. * rounding to nearest ties to even. -``` + Data quantization formula is: ``` y = saturate (round (x / y_scale) + y_zero_point) +``` + * for saturation, it saturates to [0, 255] if it's uint8, or [-127, 127] if it's int8. Right now only uint8 is supported. * rounding to nearest ties to even. -```

Python version: `onnx_ops.dynamicquantizelinear(x)` @@ -926,9 +925,11 @@ Python version: `onnx_ops.dynamicquantizelinear(x)` ## Einsum

-An einsum of the form ```term1, term2 -> output-term``` produces an output tensor using the following equation +An einsum of the form `term1, term2 -> output-term` produces an output tensor using the following equation -```output[output-term] = reduce-sum( input1[term1] * input2[term] )``` +``` +output[output-term] = reduce-sum( input1[term1] * input2[term] ) +``` where the reduce-sum performs a summation over all the indices occurring in the input terms (term1, term2) that do not occur in the output-term. @@ -1051,7 +1052,7 @@ Python version: `onnx_ops.flatten(input, axis)`

Floor takes one input data (Tensor) and produces one output data (Tensor) where the floor is, y = floor(x), is applied to -the tensor elementwise. +the tensor elementwise. If x is integral, +0, -0, NaN, or infinite, x itself is returned.

Python version: `onnx_ops.floor(X)` @@ -1066,73 +1067,47 @@ implementation such as CuDNN. Notations: -`X` - input tensor - -`z` - update gate - -`r` - reset gate - -`h` - hidden gate - -`t` - time step (t-1 means previous time step) - -`W[zrh]` - W parameter weight matrix for update, reset, and hidden gates - -`R[zrh]` - R recurrence weight matrix for update, reset, and hidden gates - -`Wb[zrh]` - W bias vectors for update, reset, and hidden gates - -`Rb[zrh]` - R bias vectors for update, reset, and hidden gates - -`WB[zrh]` - W parameter weight matrix for backward update, reset, and hidden gates - -`RB[zrh]` - R recurrence weight matrix for backward update, reset, and hidden gates - -`WBb[zrh]` - W bias vectors for backward update, reset, and hidden gates - -`RBb[zrh]` - R bias vectors for backward update, reset, and hidden gates - -`H` - Hidden state - -`num_directions` - 2 if direction == bidirectional else 1 +* `X` - input tensor +* `z` - update gate +* `r` - reset gate +* `h` - hidden gate +* `t` - time step (t-1 means previous time step) +* `W[zrh]` - W parameter weight matrix for update, reset, and hidden gates +* `R[zrh]` - R recurrence weight matrix for update, reset, and hidden gates +* `Wb[zrh]` - W bias vectors for update, reset, and hidden gates +* `Rb[zrh]` - R bias vectors for update, reset, and hidden gates +* `WB[zrh]` - W parameter weight matrix for backward update, reset, and hidden gates +* `RB[zrh]` - R recurrence weight matrix for backward update, reset, and hidden gates +* `WBb[zrh]` - W bias vectors for backward update, reset, and hidden gates +* `RBb[zrh]` - R bias vectors for backward update, reset, and hidden gates +* `H` - Hidden state +* `num_directions` - 2 if direction == bidirectional else 1 Activation functions: - Relu(x) - max(0, x) - - Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) - - Sigmoid(x) - 1/(1 + e^{-x}) - - (NOTE: Below are optional) +* Relu(x) - max(0, x) +* Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) +* Sigmoid(x) - 1/(1 + e^{-x}) - Affine(x) - alpha*x + beta +NOTE: + Below are optional - LeakyRelu(x) - x if x >= 0 else alpha * x - - ThresholdedRelu(x) - x if x >= alpha else 0 - - ScaledTanh(x) - alpha*Tanh(beta*x) - - HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) - - Elu(x) - x if x >= 0 else alpha*(e^x - 1) - - Softsign(x) - x/(1 + |x|) - - Softplus(x) - log(1 + e^x) +* Affine(x) - alpha * x + beta +* LeakyRelu(x) - x if x >= 0 else alpha * x +* ThresholdedRelu(x) - x if x >= alpha else 0 +* ScaledTanh(x) - alpha * Tanh(beta * x) +* HardSigmoid(x) - min(max(alpha * x + beta, 0), 1) +* Elu(x) - x if x >= 0 else alpha * (e^x - 1) +* Softsign(x) - x/(1 + |x|) +* Softplus(x) - log(1 + e^x) Equations (Default: f=Sigmoid, g=Tanh): - - zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz + Rbz) - - - rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr) - - - ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh) # default, when linear_before_reset = 0 - - - ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset != 0 - - - Ht = (1 - zt) (.) ht + zt (.) Ht-1 +* zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz + Rbz) +* rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr) +* ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh) # default, when linear_before_reset = 0 +* ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset != 0 +* Ht = (1 - zt) (.) ht + zt (.) Ht-1 This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

@@ -1147,56 +1122,49 @@ Given `data` tensor of rank r >= 1, and `indices` tensor of rank q, gather entries of the axis dimension of `data` (by default outer-most one as axis=0) indexed by `indices`, and concatenates them in an output tensor of rank q + (r - 1). -axis = 0 : - -Let -k = indices[i_{0}, ..., i_{q-1}] -Then -output[i_{0}, ..., i_{q-1}, j_{0}, ..., j_{r-2}] = input[k , j_{0}, ..., j_{r-2}] +If `axis = 0`, let `k = indices[i_{0}, ..., i_{q-1}]` +then `output[i_{0}, ..., i_{q-1}, j_{0}, ..., j_{r-2}] = input[k , j_{0}, ..., j_{r-2}]`: ``` - data = [ - [1.0, 1.2], - [2.3, 3.4], - [4.5, 5.7], - ] - indices = [ - [0, 1], - [1, 2], - ] - output = [ - [ - [1.0, 1.2], - [2.3, 3.4], - ], - [ - [2.3, 3.4], - [4.5, 5.7], - ], - ] +data = [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], +] +indices = [ + [0, 1], + [1, 2], +] +output = [ + [ + [1.0, 1.2], + [2.3, 3.4], + ], + [ + [2.3, 3.4], + [4.5, 5.7], + ], +] ``` -axis = 1 : -Let -k = indices[i_{0}, ..., i_{q-1}] -Then -output[j_{0}, i_{0}, ..., i_{q-1}, j_{1}, ..., j_{r-2}] = input[j_{0}, k, j_{1}, ..., j_{r-2}] +If `axis = 1`, let `k = indices[i_{0}, ..., i_{q-1}]` +then `output[j_{0}, i_{0}, ..., i_{q-1}, j_{1}, ..., j_{r-2}] = input[j_{0}, k, j_{1}, ..., j_{r-2}]`: ``` - data = [ - [1.0, 1.2, 1.9], - [2.3, 3.4, 3.9], - [4.5, 5.7, 5.9], - ] - indices = [ - [0, 2], - ] - axis = 1, - output = [ - [[1.0, 1.9]], - [[2.3, 3.9]], - [[4.5, 5.9]], - ] +data = [ + [1.0, 1.2, 1.9], + [2.3, 3.4, 3.9], + [4.5, 5.7, 5.9], +] +indices = [ + [0, 2], +] +axis = 1, +output = [ + [[1.0, 1.9]], + [[2.3, 3.9]], + [[4.5, 5.9]], +] ```

@@ -1219,45 +1187,45 @@ Its output shape is the same as the shape of `indices` and consists of one value For instance, in the 3-D case (r = 3), the output produced is determined by the following equations: ``` - out[i][j][k] = input[index[i][j][k]][j][k] if axis = 0, - out[i][j][k] = input[i][index[i][j][k]][k] if axis = 1, - out[i][j][k] = input[i][j][index[i][j][k]] if axis = 2, +out[i][j][k] = input[index[i][j][k]][j][k] if axis = 0, +out[i][j][k] = input[i][index[i][j][k]][k] if axis = 1, +out[i][j][k] = input[i][j][index[i][j][k]] if axis = 2, ``` This operator is also the inverse of ScatterElements. It is similar to Torch's gather operation. Example 1: ``` - data = [ - [1, 2], - [3, 4], - ] - indices = [ - [0, 0], - [1, 0], - ] - axis = 1 - output = [ - [1, 1], - [4, 3], - ] +data = [ + [1, 2], + [3, 4], +] +indices = [ + [0, 0], + [1, 0], +] +axis = 1 +output = [ + [1, 1], + [4, 3], +] ``` Example 2: ``` - data = [ - [1, 2, 3], - [4, 5, 6], - [7, 8, 9], - ] - indices = [ - [1, 2, 0], - [2, 0, 0], - ] - axis = 0 - output = [ - [4, 8, 3], - [7, 2, 3], - ] +data = [ + [1, 2, 3], + [4, 5, 6], + [7, 8, 9], +] +indices = [ + [1, 2, 0], + [2, 0, 0], +] +axis = 0 +output = [ + [4, 8, 3], + [7, 2, 3], +] ```

@@ -1370,9 +1338,8 @@ Python version: `onnx_ops.gathernd(data, indices, batch_dims)`

General Matrix multiplication: https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3 -A' = transpose(A) if transA else A - -B' = transpose(B) if transB else B +* A' = transpose(A) if transA else A +* B' = transpose(B) if transB else B Compute Y = alpha * A' * B' + beta * C, where input tensor A has shape (M, K) or (K, M), input tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N), @@ -1529,14 +1496,14 @@ Python version: `onnx_ops.isnan(X)`

Local Response Normalization proposed in the [AlexNet paper](https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf). It normalizes over local input regions. -The local region is defined across the channels. For an element X[n, c, d1, ..., dk] in a tensor -of shape (N x C x D1 x D2, ..., Dk), its region is -{X[n, i, d1, ..., dk] | max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2))}. +The local region is defined across the channels. For an element `X[n, c, d1, ..., dk]` in a tensor +of shape `(N x C x D1 x D2, ..., Dk)`, its region is +`{X[n, i, d1, ..., dk] | max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2))}`. -square_sum[n, c, d1, ..., dk] = sum(X[n, i, d1, ..., dk] ^ 2), -where max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2)). +`square_sum[n, c, d1, ..., dk] = sum(X[n, i, d1, ..., dk] ^ 2)`, +where `max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2))`. -Y[n, c, d1, ..., dk] = X[n, c, d1, ..., dk] / (bias + alpha / size * square_sum[n, c, d1, ..., dk] ) ^ beta +`Y[n, c, d1, ..., dk] = X[n, c, d1, ..., dk] / (bias + alpha / size * square_sum[n, c, d1, ..., dk] ) ^ beta`

Python version: `onnx_ops.lrn(X, alpha, beta, bias, size)` @@ -1551,81 +1518,50 @@ custom implementation such as CuDNN. Notations: -`X` - input tensor - -`i` - input gate - -`o` - output gate - -`f` - forget gate - -`c` - cell gate - -`t` - time step (t-1 means previous time step) - -`W[iofc]` - W parameter weight matrix for input, output, forget, and cell gates - -`R[iofc]` - R recurrence weight matrix for input, output, forget, and cell gates - -`Wb[iofc]` - W bias vectors for input, output, forget, and cell gates - -`Rb[iofc]` - R bias vectors for input, output, forget, and cell gates - -`P[iof]` - P peephole weight vector for input, output, and forget gates - -`WB[iofc]` - W parameter weight matrix for backward input, output, forget, and cell gates - -`RB[iofc]` - R recurrence weight matrix for backward input, output, forget, and cell gates - -`WBb[iofc]` - W bias vectors for backward input, output, forget, and cell gates - -`RBb[iofc]` - R bias vectors for backward input, output, forget, and cell gates - -`PB[iof]` - P peephole weight vector for backward input, output, and forget gates - -`H` - Hidden state - -`num_directions` - 2 if direction == bidirectional else 1 +* `X` - input tensor +* `i` - input gate +* `o` - output gate +* `f` - forget gate +* `c` - cell gate +* `t` - time step (t-1 means previous time step) +* `W[iofc]` - W parameter weight matrix for input, output, forget, and cell gates +* `R[iofc]` - R recurrence weight matrix for input, output, forget, and cell gates +* `Wb[iofc]` - W bias vectors for input, output, forget, and cell gates +* `Rb[iofc]` - R bias vectors for input, output, forget, and cell gates +* `P[iof]` - P peephole weight vector for input, output, and forget gates +* `WB[iofc]` - W parameter weight matrix for backward input, output, forget, and cell gates +* `RB[iofc]` - R recurrence weight matrix for backward input, output, forget, and cell gates +* `WBb[iofc]` - W bias vectors for backward input, output, forget, and cell gates +* `RBb[iofc]` - R bias vectors for backward input, output, forget, and cell gates +* `PB[iof]` - P peephole weight vector for backward input, output, and forget gates +* `H` - Hidden state +* `num_directions` - 2 if direction == bidirectional else 1 Activation functions: - Relu(x) - max(0, x) - - Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) - - Sigmoid(x) - 1/(1 + e^{-x}) - - (NOTE: Below are optional) +* Relu(x) - max(0, x) +* Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) +* Sigmoid(x) - 1/(1 + e^{-x}) - Affine(x) - alpha*x + beta +NOTE: Below are optional - LeakyRelu(x) - x if x >= 0 else alpha * x - - ThresholdedRelu(x) - x if x >= alpha else 0 - - ScaledTanh(x) - alpha*Tanh(beta*x) - - HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) - - Elu(x) - x if x >= 0 else alpha*(e^x - 1) - - Softsign(x) - x/(1 + |x|) - - Softplus(x) - log(1 + e^x) +* Affine(x) - alpha*x + beta +* LeakyRelu(x) - x if x >= 0 else alpha * x +* ThresholdedRelu(x) - x if x >= alpha else 0 +* ScaledTanh(x) - alpha*Tanh(beta*x) +* HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) +* Elu(x) - x if x >= 0 else alpha*(e^x - 1) +* Softsign(x) - x/(1 + |x|) +* Softplus(x) - log(1 + e^x) Equations (Default: f=Sigmoid, g=Tanh, h=Tanh): - - it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi) - - - ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf) - - - ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc) - - - Ct = ft (.) Ct-1 + it (.) ct - - - ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo) - - - Ht = ot (.) h(Ct) +* it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi) +* ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf) +* ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc) +* Ct = ft (.) Ct-1 + it (.) ct +* ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo) +* Ht = ot (.) h(Ct) This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

@@ -1768,11 +1704,7 @@ Python version: `onnx_ops.max(data_0)` ``` output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1) ``` - if ceil_mode is enabled - - ``` - * pad_shape[i] is sum of pads along axis i - ``` + if ceil_mode is enabled `pad_shape[i]` is the sum of pads along axis `i`. `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following: ``` @@ -1844,7 +1776,7 @@ Python version: `onnx_ops.mean(data_0)` ## MeanVarianceNormalization

A MeanVarianceNormalization Function: Perform mean variance normalization - on the input tensor X using formula:
``` (X-EX)/sqrt(E(X-EX)^2) ``` + on the input tensor X using formula: `(X-EX)/sqrt(E(X-EX)^2)`

Python version: `onnx_ops.meanvariancenormalization(X, axes)` @@ -1867,16 +1799,16 @@ Python version: `onnx_ops.min(data_0)` ## Mod

Performs element-wise binary modulus (with Numpy-style broadcasting support). - The sign of the remainder is the same as that of the Divisor. + The sign of the remainder is the same as that of the Divisor. - Mod operator can also behave like C fmod() or numpy.fmod. In this case, the sign of the remainder however, will be the same as the Dividend - (in contrast to integer mod). To force a behavior like numpy.fmod() an 'fmod' Attribute is provided. - This attribute is set to 0 by default causing the behavior to be like integer mod. - Setting this attribute to 1 causes the remainder to be calculated similar to that of numpy.fmod(). + Mod operator can also behave like C fmod() or numpy.fmod. In this case, the sign of the remainder however, will be the same as the Dividend + (in contrast to integer mod). To force a behavior like numpy.fmod() an 'fmod' Attribute is provided. + This attribute is set to 0 by default causing the behavior to be like integer mod. + Setting this attribute to 1 causes the remainder to be calculated similar to that of numpy.fmod(). - If the input type is floating point, then `fmod` attribute must be set to 1. + If the input type is floating point, then `fmod` attribute must be set to 1. - In case of dividend being zero, the results will be platform dependent. + In case of dividend being zero, the results will be platform dependent. This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md).

@@ -1932,85 +1864,100 @@ The operator's "target" input tensor has the shape of (N, d1, d2, ..., dk). It e or it may contain a special value (indicated by an attribute ignore_index) for N x d1 x d2 x ... x dk samples. The loss value for input[n, :, d_1, d_2,...d_k] being classified as class c = target[n][d_1][d_2]...[d_k] is computed as: - loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k]. +``` +loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k]. +``` When an optional "weight" is provided, the sample loss is calculated as: - loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k] * weight[c]. +``` +loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k] * weight[c]. +``` loss is zero for the case when target-value equals ignore_index. - loss[n][d_1][d_2]...[d_k] = 0, when target[n][d_1][d_2]...[d_k] = ignore_index +``` +loss[n][d_1][d_2]...[d_k] = 0, when target[n][d_1][d_2]...[d_k] = ignore_index +``` If "reduction" attribute is set to "none", the operator's output will be the above loss with shape (N, d1, d2, ..., dk). If "reduction" attribute is set to "mean" (the default attribute value), the output loss is (weight) averaged: - mean(loss), if "weight" is not provided, +``` +mean(loss), if "weight" is not provided, +``` or if weight is provided, - sum(loss) / sum(weight[target[n][d_1][d_2]...[d_k]]]), for all samples. +``` +sum(loss) / sum(weight[target[n][d_1][d_2]...[d_k]]]), for all samples. +``` -If "reduction" attribute is set to "sum", the output is a scalar: - sum(loss). +If "reduction" attribute is set to "sum", the output is a scalar: `sum(loss)`. See also https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss. Example 1: - // negative log likelihood loss, "none" reduction - N, C, d1 = 2, 3, 2 - input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], - [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] - target = [[2, 1], [0, 2]] - - loss = np.zeros((N, d1)) - for n in range(N): - for d_1 in range(d1): - c = target[n][d_1] - loss[n][d_1] = -input[n][c][d_1] - - // print(loss) - // [[-3. -2.] - // [-0. -2.]] +``` +// negative log likelihood loss, "none" reduction +N, C, d1 = 2, 3, 2 +input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], + [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] +target = [[2, 1], [0, 2]] + +loss = np.zeros((N, d1)) +for n in range(N): + for d_1 in range(d1): + c = target[n][d_1] + loss[n][d_1] = -input[n][c][d_1] + +// print(loss) +// [[-3. -2.] +// [-0. -2.]] +``` Example 2: - // weighted negative log likelihood loss, sum reduction - N, C, d1 = 2, 3, 2 - input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], - [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] - target = [[2, 1], [0, 2]] - weight = [0.2, 0.3, 0.1] - loss = np.zeros((N, d1)) - for n in range(N): - for d_1 in range(d1): - c = target[n][d_1] - loss[n][d_1] = -input[n][c][d_1] * weight[c] - - loss = np.sum(loss) - // print(loss) - // -1.1 +``` +// weighted negative log likelihood loss, sum reduction +N, C, d1 = 2, 3, 2 +input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], + [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] +target = [[2, 1], [0, 2]] +weight = [0.2, 0.3, 0.1] +loss = np.zeros((N, d1)) +for n in range(N): + for d_1 in range(d1): + c = target[n][d_1] + loss[n][d_1] = -input[n][c][d_1] * weight[c] + +loss = np.sum(loss) +// print(loss) +// -1.1 +``` Example 3: - // weighted negative log likelihood loss, mean reduction - N, C, d1 = 2, 3, 2 - input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], - [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] - target = [[2, 1], [0, 2]] - weight = [0.2, 0.3, 0.1] - loss = np.zeros((N, d1)) - weight_total = 0 - for n in range(N): - for d_1 in range(d1): - c = target[n][d_1] - loss[n][d_1] = -input[n][c][d_1] * weight[c] - weight_total = weight_total + weight[c] - - loss = np.sum(loss) / weight_total - // print(loss) - // -1.57 +``` +// weighted negative log likelihood loss, mean reduction +N, C, d1 = 2, 3, 2 +input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], + [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] +target = [[2, 1], [0, 2]] +weight = [0.2, 0.3, 0.1] +loss = np.zeros((N, d1)) +weight_total = 0 +for n in range(N): + for d_1 in range(d1): + c = target[n][d_1] + loss[n][d_1] = -input[n][c][d_1] * weight[c] + weight_total = weight_total + weight[c] + +loss = np.sum(loss) / weight_total +// print(loss) +// -1.57 +```

Python version: `onnx_ops.negativeloglikelihoodloss(input, target, weight, ignore_index, reduction)` @@ -2281,7 +2228,7 @@ The linear quantization operator. It consumes a high precision tensor, a scale, The scale factor and zero point must have same shape, and can be either a scalar for per-tensor / per layer quantization, or a 1-D tensor for per-axis quantization. The quantization formula is y = saturate ((x / y_scale) + y_zero_point). For saturation, it saturates to [0, 255] if it's uint8, or [-128, 127] if it's int8. -For (x / y_scale), it's rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details. 'y_zero_point' and 'y' must have same type. +For (x / y_scale), it's rounding to the nearest even. Refer to https://en.wikipedia.org/wiki/Rounding for details. 'y_zero_point' and 'y' must have same type.

Python version: `onnx_ops.quantizelinear(x, y_scale, y_zero_point, axis)` @@ -2296,61 +2243,40 @@ via some custom implementation such as CuDNN. Notations: -`X` - input tensor - -`i` - input gate - -`t` - time step (t-1 means previous time step) - -`Wi` - W parameter weight matrix for input gate - -`Ri` - R recurrence weight matrix for input gate - -`Wbi` - W parameter bias vector for input gate - -`Rbi` - R parameter bias vector for input gate - -`WBi` - W parameter weight matrix for backward input gate - -`RBi` - R recurrence weight matrix for backward input gate - -`WBbi` - WR bias vectors for backward input gate - -`RBbi` - RR bias vectors for backward input gate - -`H` - Hidden state - -`num_directions` - 2 if direction == bidirectional else 1 +* `X` - input tensor +* `i` - input gate +* `t` - time step (t-1 means previous time step) +* `Wi` - W parameter weight matrix for input gate +* `Ri` - R recurrence weight matrix for input gate +* `Wbi` - W parameter bias vector for input gate +* `Rbi` - R parameter bias vector for input gate +* `WBi` - W parameter weight matrix for backward input gate +* `RBi` - R recurrence weight matrix for backward input gate +* `WBbi` - WR bias vectors for backward input gate +* `RBbi` - RR bias vectors for backward input gate +* `H` - Hidden state +* `num_directions` - 2 if direction == bidirectional else 1 Activation functions: - Relu(x) - max(0, x) - - Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) - - Sigmoid(x) - 1/(1 + e^{-x}) - - (NOTE: Below are optional) - - Affine(x) - alpha*x + beta - - LeakyRelu(x) - x if x >= 0 else alpha * x - - ThresholdedRelu(x) - x if x >= alpha else 0 +* Relu(x) - max(0, x) +* Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) +* Sigmoid(x) - 1/(1 + e^{-x}) - ScaledTanh(x) - alpha*Tanh(beta*x) +NOTE: Below are optional - HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) - - Elu(x) - x if x >= 0 else alpha*(e^x - 1) - - Softsign(x) - x/(1 + |x|) - - Softplus(x) - log(1 + e^x) +* Affine(x) - alpha*x + beta +* LeakyRelu(x) - x if x >= 0 else alpha * x +* ThresholdedRelu(x) - x if x >= alpha else 0 +* ScaledTanh(x) - alpha*Tanh(beta*x) +* HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) +* Elu(x) - x if x >= 0 else alpha*(e^x - 1) +* Softsign(x) - x/(1 + |x|) +* Softplus(x) - log(1 + e^x) Equations (Default: f=Tanh): - - Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi) +* Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi) This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.

@@ -2427,28 +2353,33 @@ Python version: `onnx_ops.randomuniformlike(input, dtype, high, low, seed)` Generate a tensor containing a sequence of numbers that begin at `start` and extends by increments of `delta` up to `limit` (exclusive). -The number of elements in the output of range is computed as below- - -`number_of_elements = max( ceil( (limit - start) / delta ) , 0 )` +The number of elements in the output of range is computed as below: -The pseudocode determining the contents of the output is shown below- - -`for(int i=0; i

Python version: `onnx_ops.range(start, limit, delta)` @@ -2470,9 +2401,10 @@ Python version: `onnx_ops.reciprocal(X)` ## ReduceL1

-Computes the L1 norm of the input tensor's element along the provided axes. The resulting +Computes the L1 norm of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then -the resulting tensor has the reduced dimension pruned. +the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True.

@@ -2484,9 +2416,10 @@ Python version: `onnx_ops.reducel1(data, axes, keepdims)` ## ReduceL2

-Computes the L2 norm of the input tensor's element along the provided axes. The resulting +Computes the L2 norm of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then -the resulting tensor has the reduced dimension pruned. +the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True.

@@ -2498,9 +2431,10 @@ Python version: `onnx_ops.reducel2(data, axes, keepdims)` ## ReduceLogSum

-Computes the log sum of the input tensor's element along the provided axes. The resulting +Computes the log sum of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then -the resulting tensor has the reduced dimension pruned. +the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True.

@@ -2512,9 +2446,10 @@ Python version: `onnx_ops.reducelogsum(data, axes, keepdims)` ## ReduceLogSumExp

-Computes the log sum exponent of the input tensor's element along the provided axes. The resulting +Computes the log sum exponent of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then -the resulting tensor has the reduced dimension pruned. +the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True.

@@ -2526,9 +2461,10 @@ Python version: `onnx_ops.reducelogsumnumpy.exp(data, axes, keepdims)` ## ReduceMax

-Computes the max of the input tensor's element along the provided axes. The resulting +Computes the max of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then -the resulting tensor has the reduced dimension pruned. +the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True.

@@ -2540,9 +2476,10 @@ Python version: `onnx_ops.reducemax(data, axes, keepdims)` ## ReduceMean

-Computes the mean of the input tensor's element along the provided axes. The resulting +Computes the mean of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then -the resulting tensor has the reduced dimension pruned. +the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True.

@@ -2554,9 +2491,10 @@ Python version: `onnx_ops.reducemean(data, axes, keepdims)` ## ReduceMin

-Computes the min of the input tensor's element along the provided axes. The resulting +Computes the min of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then -the resulting tensor has the reduced dimension pruned. +the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True.

@@ -2568,9 +2506,10 @@ Python version: `onnx_ops.reducemin(data, axes, keepdims)` ## ReduceProd

-Computes the product of the input tensor's element along the provided axes. The resulting +Computes the product of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then -the resulting tensor has the reduced dimension pruned. +the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True.

@@ -2582,9 +2521,10 @@ Python version: `onnx_ops.reduceprod(data, axes, keepdims)` ## ReduceSum

-Computes the sum of the input tensor's element along the provided axes. The resulting +Computes the sum of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then -the resulting tensor has the reduced dimension pruned. +the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True.

@@ -2596,9 +2536,10 @@ Python version: `onnx_ops.reducesum(data, axes, keepdims, noop_with_empty_axes)` ## ReduceSumSquare

-Computes the sum square of the input tensor's element along the provided axes. The resulting +Computes the sum square of the input tensor's elements along the provided axes. The resulting tensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then -the resulting tensor has the reduced dimension pruned. +the resulting tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True.

@@ -2721,6 +2662,7 @@ Python version: `onnx_ops.roialign(X, rois, batch_indices, mode, output_height, Round takes one input Tensor and rounds the values, element-wise, meaning it finds the nearest integer for each value. In case of halfs, the rule is to round them to the nearest even integer. +If input x is integral, +0, -0, NaN, or infinite, x itself is returned. The output tensor has the same shape and type as the input. Examples: @@ -3023,23 +2965,32 @@ negative axis). Thus, specifying any end value > r is equivalent to specifying a value of r, and specifying any start value < -r is equivalent to specifying a start value of 0. -For example: +Examples: + +``` Input tensor with shape: [2, 3, 4] No attributes specified. Output: [2, 3, 4] +``` +``` Input tensor with shape: [2, 3, 4] start: -1 Output: [4] +``` +``` Input tensor with shape: [2, 3, 4] end: -1 Output: [2, 3] +``` +``` Input tensor with shape: [2, 3, 4] start: 1 end: 2 Output: [3] +```

Python version: `onnx_ops.shape(data, end, start)` @@ -3149,27 +3100,34 @@ For slicing to the end of a dimension with unknown size, it is recommended to pa in `INT_MAX` when slicing forward and 'INT_MIN' when slicing backward. Example 1: - data = [ - [1, 2, 3, 4], - [5, 6, 7, 8], - ] - axes = [0, 1] - starts = [1, 0] - ends = [2, 3] - steps = [1, 2] - result = [ - [5, 7], - ] + +``` +data = [ + [1, 2, 3, 4], + [5, 6, 7, 8], +] +axes = [0, 1] +starts = [1, 0] +ends = [2, 3] +steps = [1, 2] +result = [ + [5, 7], +] +``` + Example 2: - data = [ - [1, 2, 3, 4], - [5, 6, 7, 8], - ] - starts = [0, 1] - ends = [-1, 1000] - result = [ - [2, 3, 4], - ] + +``` +data = [ + [1, 2, 3, 4], + [5, 6, 7, 8], +] +starts = [0, 1] +ends = [-1, 1000] +result = [ + [2, 3, 4], +] +```

Python version: `onnx_ops.slice(data, starts, ends, axes, steps)` @@ -3202,29 +3160,38 @@ If the input is N-D tensor with shape (N, C, D1, D2, ..., Dk), the loss tensor L may have (N, D1, D2, ..., Dk) as its shape and L[i,][j_1][j_2]...[j_k] denotes a scalar element in L. After L is available, this operator can optionally do a reduction operator. -shape(scores): (N, C) where C is the number of classes, or (N, C, D1, D2,..., Dk), - with K >= 1 in case of K-dimensional loss. -shape(labels): (N) where each value is 0 <= labels[i] <= C-1, or (N, D1, D2,..., Dk), - with K >= 1 in case of K-dimensional loss. +* shape(scores): (N, C) where C is the number of classes, or (N, C, D1, D2,..., Dk), + with K >= 1 in case of K-dimensional loss. +* shape(labels): (N) where each value is 0 <= labels[i] <= C-1, or (N, D1, D2,..., Dk), + with K >= 1 in case of K-dimensional loss. The loss for one sample, l_i, can caculated as follows: - l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk], where i is the index of classes. +``` +l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk], where i is the index of classes. +``` or - l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk] * weights[c], if 'weights' is provided. +``` +l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk] * weights[c], if 'weights' is provided. +``` loss is zero for the case when label-value equals ignore_index. - l[i][d1][d2]...[dk] = 0, when labels[n][d1][d2]...[dk] = ignore_index +``` +l[i][d1][d2]...[dk] = 0, when labels[n][d1][d2]...[dk] = ignore_index +``` where: - p = Softmax(scores) - y = Log(p) - c = labels[i][d1][d2]...[dk] +``` +p = Softmax(scores) +y = Log(p) +c = labels[i][d1][d2]...[dk] +``` Finally, L is optionally reduced: -If reduction = 'none', the output is L with shape (N, D1, D2, ..., Dk). -If reduction = 'sum', the output is scalar: Sum(L). -If reduction = 'mean', the output is scalar: ReduceMean(L), or if weight is provided: ReduceSum(L) / ReduceSum(W), -where tensor W is of shape (N, D1, D2, ..., Dk) and W[n][d1][d2]...[dk] = weights[labels[i][d1][d2]...[dk]]. + +* If reduction = 'none', the output is L with shape (N, D1, D2, ..., Dk). +* If reduction = 'sum', the output is scalar: Sum(L). +* If reduction = 'mean', the output is scalar: ReduceMean(L), or if weight is provided: `ReduceSum(L) / ReduceSum(W)`, + where tensor W is of shape `(N, D1, D2, ..., Dk)` and `W[n][d1][d2]...[dk] = weights[labels[i][d1][d2]...[dk]]`.

Python version: `onnx_ops.softmaxcrossentropyloss(scores, labels, weights, ignore_index, reduction)` @@ -3277,16 +3244,19 @@ Python version: `onnx_ops.split(input, split, axis)` ## SplitToSequence -

Split a tensor into a sequence of tensors, along the specified -'axis'. Lengths of the parts can be specified using argument 'split'. +

+Split a tensor into a sequence of tensors, along the specified 'axis'. +Lengths of the parts can be specified using the optional argument 'split'. +If the argument `split' is not specified, a default scalar value of 1 +is used as the value of `split'. 'split' must contain only positive numbers. 'split' is either a scalar (tensor of empty shape), or a 1-D tensor. -If 'split' is a scalar, then 'input' will be split into equally sized chunks(if possible). -Last chunk will be smaller if the 'input' size along the given axis 'axis' is not divisible -by 'split'. -Otherwise, the tensor is split into 'size(split)' chunks, with lengths of the parts on 'axis' -specified in 'split'. In this scenario, the sum of entries in 'split' must be equal to the -dimension size of input tensor on 'axis'. +If 'split' is a scalar, then 'input' will be split into chunks all of size 'split' +if possible. The last chunk alone may be smaller than 'split' if the 'input' size +along the given axis 'axis' is not divisible by 'split'. +If 'split' is a 1-dimensional tensor, the input tensor is split into 'size(split)' chunks, +with lengths of the parts on 'axis' specified in 'split'. In this scenario, the sum of entries +in 'split' must be equal to the dimension size of input tensor on 'axis'.

Python version: `onnx_ops.splittosequence(input, split, axis, keepdims)` @@ -3446,18 +3416,19 @@ Python version: `onnx_ops.tile(input, repeats)`

Retrieve the top-K largest or smallest elements along a specified axis. Given an input tensor of shape [a_1, a_2, ..., a_n, r] and integer argument k, return two outputs: - -Value tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] - which contains the values of the top k elements along the specified axis - -Index tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] which - contains the indices of the top k elements (original indices from the input - tensor). -If "largest" is 1 (the default value) then the k largest elements are returned. -If "sorted" is 1 (the default value) then the resulting k elements will be sorted. -If "sorted" is 0, order of returned 'Values' and 'Indices' are undefined. +* Value tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] + which contains the values of the top k elements along the specified axis +* Index tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] which + contains the indices of the top k elements (original indices from the input + tensor). + +* If "largest" is 1 (the default value) then the k largest elements are returned. +* If "sorted" is 1 (the default value) then the resulting k elements will be sorted. +* If "sorted" is 0, order of returned 'Values' and 'Indices' are undefined. Given two equivalent values, this operator uses the indices along the axis as - a tiebreaker. That is, the element with the lower index will appear first. +a tiebreaker. That is, the element with the lower index will appear first.

Python version: `onnx_ops.topk(X, K, axis, largest, sorted)` @@ -3514,67 +3485,89 @@ Outputs are either sorted in ascending order or optionally in the order of the f https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html Example 1: - input_X = [2, 1, 1, 3, 4, 3] - attribute_sorted = 0 - attribute_axis = None - output_Y = [2, 1, 3, 4] - output_indices = [0, 1, 3, 4] - output_inverse_indices = [0, 1, 1, 2, 3, 2] - output_counts = [1, 2, 2, 1] +``` +input_X = [2, 1, 1, 3, 4, 3] +attribute_sorted = 0 +attribute_axis = None +output_Y = [2, 1, 3, 4] +output_indices = [0, 1, 3, 4] +output_inverse_indices = [0, 1, 1, 2, 3, 2] +output_counts = [1, 2, 2, 1] +``` Example 2: - input_X = [[1, 3], [2, 3]] - attribute_sorted = 1 - attribute_axis = None - output_Y = [1, 2, 3] - output_indices = [0, 2, 1] - output_inverse_indices = [0, 2, 1, 2] - output_counts = [1, 1, 2] +``` +input_X = [[1, 3], [2, 3]] +attribute_sorted = 1 +attribute_axis = None +output_Y = [1, 2, 3] +output_indices = [0, 2, 1] +output_inverse_indices = [0, 2, 1, 2] +output_counts = [1, 1, 2] +``` Example 3: - input_X = [[1, 0, 0], [1, 0, 0], [2, 3, 4]] - attribute_sorted = 1 - attribute_axis = 0 - output_Y = [[1, 0, 0], [2, 3, 4]] - output_indices = [0, 2] - output_inverse_indices = [0, 0, 1] - output_counts = [2, 1] +``` +input_X = [[1, 0, 0], [1, 0, 0], [2, 3, 4]] +attribute_sorted = 1 +attribute_axis = 0 +output_Y = [[1, 0, 0], [2, 3, 4]] +output_indices = [0, 2] +output_inverse_indices = [0, 0, 1] +output_counts = [2, 1] +``` Example 4: - input_x = [[[1., 1.], [0., 1.], [2., 1.], [0., 1.]], - [[1., 1.], [0., 1.], [2., 1.], [0., 1.]]] - attribute_sorted = 1 - attribute_axis = 1 - - intermediate data are presented below for better understanding: +``` +input_x = [[[1., 1.], [0., 1.], [2., 1.], [0., 1.]], + [[1., 1.], [0., 1.], [2., 1.], [0., 1.]]] +attribute_sorted = 1 +attribute_axis = 1 +``` - there are 4 subtensors sliced along axis 1 of input_x (shape = (2, 4, 2)): - A: [[1, 1], [1, 1]], - [[0, 1], [0, 1]], - [[2, 1], [2, 1]], - [[0, 1], [0, 1]]. +intermediate data are presented below for better understanding: +there are 4 subtensors sliced along axis 1 of input_x (shape = (2, 4, 2)): +``` +A: [[1, 1], [1, 1]], + [[0, 1], [0, 1]], + [[2, 1], [2, 1]], + [[0, 1], [0, 1]]. +``` - there are 3 unique subtensors: - [[1, 1], [1, 1]], - [[0, 1], [0, 1]], - [[2, 1], [2, 1]]. +there are 3 unique subtensors: +``` +[[1, 1], [1, 1]], +[[0, 1], [0, 1]], +[[2, 1], [2, 1]]. +``` - sorted unique subtensors: - B: [[0, 1], [0, 1]], - [[1, 1], [1, 1]], - [[2, 1], [2, 1]]. +sorted unique subtensors: +``` +B: [[0, 1], [0, 1]], + [[1, 1], [1, 1]], + [[2, 1], [2, 1]]. +``` - output_Y is constructed from B: - [[[0. 1.], [1. 1.], [2. 1.]], - [[0. 1.], [1. 1.], [2. 1.]]] +output_Y is constructed from B: +``` +[[[0. 1.], [1. 1.], [2. 1.]], + [[0. 1.], [1. 1.], [2. 1.]]] +``` - output_indices is to map from B to A: - [1, 0, 2] +output_indices is to map from B to A: +``` +[1, 0, 2] +``` - output_inverse_indices is to map from A to B: - [1, 0, 2, 0] +output_inverse_indices is to map from A to B: +``` +[1, 0, 2, 0] +``` - output_counts = [2 1 1] +output_counts: +``` +[2, 1, 1] +```

Python version: `onnx_ops.unique(X, axis, sorted)` @@ -3587,15 +3580,13 @@ Python version: `onnx_ops.unique(X, axis, sorted)` Insert single-dimensional entries to the shape of an input tensor (`data`). Takes one required input `axes` - which contains a list of dimension indices and this operator will insert a dimension of value `1` into the corresponding index of the output tensor (`expanded`). -For example: - Given an input tensor (`data`) of shape [3, 4, 5], then - Unsqueeze(data, axes=[0, 4]) outputs a tensor (`expanded`) containing same data as `data` but with shape [1, 3, 4, 5, 1]. +For example, given an input tensor (`data`) of shape [3, 4, 5], then +Unsqueeze(data, axes=[0, 4]) outputs a tensor (`expanded`) containing same data as `data` but with shape [1, 3, 4, 5, 1]. The input `axes` should not contain any duplicate entries. It is an error if it contains duplicates. The rank of the output tensor (`output_rank`) is the rank of the input tensor (`data`) plus the number of values in `axes`. Each value in `axes` should be within the (inclusive) range [-output_rank , output_rank - 1]. The order of values in `axes` does not matter and can come in any order. -

Python version: `onnx_ops.unsqueeze(data, axes)` @@ -3644,25 +3635,25 @@ Python version: `onnx_ops.xor(A, B)` ## pattern_matching_function

Returns the productions that match the given goal and retrieval buffers.

-

pattern_matching_function(productions, goal, retrieval) = pattern_matching_function(productions,goal,retrieval)

+

pattern_matching_function(productions, goal, retrieval) = actr.pattern_matching_function(productions,goal,retrieval)

-Python version: `pattern_matching_function(productions,goal,retrieval)` +Python version: `actr.pattern_matching_function(productions,goal,retrieval)` ## pattern_to_string

Converts a pattern dictionary to a string format.

-

pattern_to_string(chunk) = pattern_to_string(chunk)

+

pattern_to_string(chunk) = actr.pattern_to_string(chunk)

-Python version: `pattern_to_string(chunk)` +Python version: `actr.pattern_to_string(chunk)` ## retrieve_chunk

Retrieve a chunk from declarative memory given a pattern.

-

retrieve_chunk(pattern, dm_chunks, types) = retrieve_chunk(pattern,dm_chunks,types)

+

retrieve_chunk(pattern, dm_chunks, types) = actr.retrieve_chunk(pattern,dm_chunks,types)

-Python version: `retrieve_chunk(pattern,dm_chunks,types)` +Python version: `actr.retrieve_chunk(pattern,dm_chunks,types)` @@ -3700,22 +3691,22 @@ Python version: `scale * numpy.tanh(variable0)` ## update_buffer

Returns a pattern to update the given buffer with.

-

update_buffer(production, buffer) = update_buffer(production,buffer)

+

update_buffer(production, buffer) = actr.update_buffer(production,buffer)

-Python version: `update_buffer(production,buffer)` +Python version: `actr.update_buffer(production,buffer)` ## update_goal

Returns a pattern to update the goal buffer with.

-

update_goal(production) = update_goal(production)

+

update_goal(production) = actr.update_goal(production)

-Python version: `update_goal(production)` +Python version: `actr.update_goal(production)` ## update_retrieval

Returns a pattern to update the retrieval buffer with.

-

update_retrieval(production) = update_retrieval(production)

+

update_retrieval(production) = actr.update_retrieval(production)

-Python version: `update_retrieval(production)` +Python version: `actr.update_retrieval(production)` diff --git a/docs/sphinx/source/api/export_format/ONNX/ab.png b/docs/sphinx/source/api/export_format/ONNX/ab.png index d45792450..9571df211 100644 Binary files a/docs/sphinx/source/api/export_format/ONNX/ab.png and b/docs/sphinx/source/api/export_format/ONNX/ab.png differ diff --git a/docs/sphinx/source/api/export_format/ONNX/simple_ab.py b/docs/sphinx/source/api/export_format/ONNX/simple_ab.py index 105dbac50..6d7d5111b 100644 --- a/docs/sphinx/source/api/export_format/ONNX/simple_ab.py +++ b/docs/sphinx/source/api/export_format/ONNX/simple_ab.py @@ -100,7 +100,7 @@ def main(): for t in test_values: print("===================\nEvaluating MDF model with input: %s" % t) eg.evaluate(initializer={"input": t}) - print("Output: %s" % eg.enodes["Mul_3"].evaluable_outputs["_4"].curr_value) + print("Output: %s" % eg.enodes["/B/Mul"].evaluable_outputs["_4"].curr_value) if __name__ == "__main__": diff --git a/docs/sphinx/source/api/export_format/PyTorch/inception.png b/docs/sphinx/source/api/export_format/PyTorch/inception.png index ed468c27f..a58290a49 100644 Binary files a/docs/sphinx/source/api/export_format/PyTorch/inception.png and b/docs/sphinx/source/api/export_format/PyTorch/inception.png differ diff --git a/examples/ACT-R/addition.json b/examples/ACT-R/addition.json index 9b124245b..8423d002a 100644 --- a/examples/ACT-R/addition.json +++ b/examples/ACT-R/addition.json @@ -1,7 +1,7 @@ { "addition": { "format": "ModECI MDF v0.4", - "generating_application": "Python modeci-mdf v0.4.8", + "generating_application": "Python modeci-mdf v0.4.9", "graphs": { "addition_graph": { "nodes": { diff --git a/examples/ACT-R/addition.yaml b/examples/ACT-R/addition.yaml index 6eae1c08c..df10d2369 100644 --- a/examples/ACT-R/addition.yaml +++ b/examples/ACT-R/addition.yaml @@ -1,6 +1,6 @@ addition: format: ModECI MDF v0.4 - generating_application: Python modeci-mdf v0.4.8 + generating_application: Python modeci-mdf v0.4.9 graphs: addition_graph: nodes: diff --git a/examples/ACT-R/count.json b/examples/ACT-R/count.json index 93c45d6a7..7c13a54db 100644 --- a/examples/ACT-R/count.json +++ b/examples/ACT-R/count.json @@ -1,7 +1,7 @@ { "count": { "format": "ModECI MDF v0.4", - "generating_application": "Python modeci-mdf v0.4.8", + "generating_application": "Python modeci-mdf v0.4.9", "graphs": { "count_graph": { "nodes": { diff --git a/examples/ACT-R/count.yaml b/examples/ACT-R/count.yaml index 4b6d87a73..8c94ea648 100644 --- a/examples/ACT-R/count.yaml +++ b/examples/ACT-R/count.yaml @@ -1,6 +1,6 @@ count: format: ModECI MDF v0.4 - generating_application: Python modeci-mdf v0.4.8 + generating_application: Python modeci-mdf v0.4.9 graphs: count_graph: nodes: diff --git a/examples/MDF/ABCD.json b/examples/MDF/ABCD.json index acd55c3ad..c553bde56 100644 --- a/examples/MDF/ABCD.json +++ b/examples/MDF/ABCD.json @@ -1,7 +1,7 @@ { "ABCD": { "format": "ModECI MDF v0.4", - "generating_application": "Python modeci-mdf v0.4.8", + "generating_application": "Python modeci-mdf v0.4.9", "graphs": { "abcd_example": { "nodes": { diff --git a/examples/MDF/ABCD.yaml b/examples/MDF/ABCD.yaml index 662f50f6d..c87d7becb 100644 --- a/examples/MDF/ABCD.yaml +++ b/examples/MDF/ABCD.yaml @@ -1,6 +1,6 @@ ABCD: format: ModECI MDF v0.4 - generating_application: Python modeci-mdf v0.4.8 + generating_application: Python modeci-mdf v0.4.9 graphs: abcd_example: nodes: diff --git a/examples/MDF/Arrays.json b/examples/MDF/Arrays.json index 16139e280..fa108749a 100644 --- a/examples/MDF/Arrays.json +++ b/examples/MDF/Arrays.json @@ -1,7 +1,7 @@ { "Arrays": { "format": "ModECI MDF v0.4", - "generating_application": "Python modeci-mdf v0.4.8", + "generating_application": "Python modeci-mdf v0.4.9", "graphs": { "array_example": { "nodes": { diff --git a/examples/MDF/Arrays.yaml b/examples/MDF/Arrays.yaml index ca0d2dd16..3825866f7 100644 --- a/examples/MDF/Arrays.yaml +++ b/examples/MDF/Arrays.yaml @@ -1,6 +1,6 @@ Arrays: format: ModECI MDF v0.4 - generating_application: Python modeci-mdf v0.4.8 + generating_application: Python modeci-mdf v0.4.9 graphs: array_example: nodes: diff --git a/examples/MDF/ParametersFunctions.json b/examples/MDF/ParametersFunctions.json index c09abea96..b97f9a78a 100644 --- a/examples/MDF/ParametersFunctions.json +++ b/examples/MDF/ParametersFunctions.json @@ -1,7 +1,7 @@ { "ParametersFunctions": { "format": "ModECI MDF v0.4", - "generating_application": "Python modeci-mdf v0.4.8", + "generating_application": "Python modeci-mdf v0.4.9", "graphs": { "params_funcs_example": { "nodes": { diff --git a/examples/MDF/ParametersFunctions.yaml b/examples/MDF/ParametersFunctions.yaml index 7f506ee09..1e1155119 100644 --- a/examples/MDF/ParametersFunctions.yaml +++ b/examples/MDF/ParametersFunctions.yaml @@ -1,6 +1,6 @@ ParametersFunctions: format: ModECI MDF v0.4 - generating_application: Python modeci-mdf v0.4.8 + generating_application: Python modeci-mdf v0.4.9 graphs: params_funcs_example: nodes: diff --git a/examples/MDF/RNN/IAF_net.json b/examples/MDF/RNN/IAF_net.json index a52b8a385..ae47caa4c 100644 --- a/examples/MDF/RNN/IAF_net.json +++ b/examples/MDF/RNN/IAF_net.json @@ -1,7 +1,7 @@ { "IAF_net": { "format": "ModECI MDF v0.4", - "generating_application": "Python modeci-mdf v0.4.8", + "generating_application": "Python modeci-mdf v0.4.9", "graphs": { "iaf_example": { "nodes": { diff --git a/examples/MDF/RNN/IAF_net.yaml b/examples/MDF/RNN/IAF_net.yaml index 47c8df4f0..b6af0d912 100644 --- a/examples/MDF/RNN/IAF_net.yaml +++ b/examples/MDF/RNN/IAF_net.yaml @@ -1,6 +1,6 @@ IAF_net: format: ModECI MDF v0.4 - generating_application: Python modeci-mdf v0.4.8 + generating_application: Python modeci-mdf v0.4.9 graphs: iaf_example: nodes: diff --git a/examples/MDF/RNN/IAF_net2.json b/examples/MDF/RNN/IAF_net2.json index f55be7fdb..7bbc1f251 100644 --- a/examples/MDF/RNN/IAF_net2.json +++ b/examples/MDF/RNN/IAF_net2.json @@ -1,7 +1,7 @@ { "IAF_net2": { "format": "ModECI MDF v0.4", - "generating_application": "Python modeci-mdf v0.4.8", + "generating_application": "Python modeci-mdf v0.4.9", "graphs": { "iaf_example": { "nodes": { diff --git a/examples/MDF/RNN/IAF_net2.yaml b/examples/MDF/RNN/IAF_net2.yaml index 6d112759c..769202ec1 100644 --- a/examples/MDF/RNN/IAF_net2.yaml +++ b/examples/MDF/RNN/IAF_net2.yaml @@ -1,6 +1,6 @@ IAF_net2: format: ModECI MDF v0.4 - generating_application: Python modeci-mdf v0.4.8 + generating_application: Python modeci-mdf v0.4.9 graphs: iaf_example: nodes: diff --git a/examples/MDF/RNN/IAFs.json b/examples/MDF/RNN/IAFs.json index 1649868de..6868698f1 100644 --- a/examples/MDF/RNN/IAFs.json +++ b/examples/MDF/RNN/IAFs.json @@ -1,7 +1,7 @@ { "IAFs": { "format": "ModECI MDF v0.4", - "generating_application": "Python modeci-mdf v0.4.8", + "generating_application": "Python modeci-mdf v0.4.9", "graphs": { "iaf_example": { "nodes": { diff --git a/examples/MDF/RNN/IAFs.yaml b/examples/MDF/RNN/IAFs.yaml index cfead5409..2d7cee1e9 100644 --- a/examples/MDF/RNN/IAFs.yaml +++ b/examples/MDF/RNN/IAFs.yaml @@ -1,6 +1,6 @@ IAFs: format: ModECI MDF v0.4 - generating_application: Python modeci-mdf v0.4.8 + generating_application: Python modeci-mdf v0.4.9 graphs: iaf_example: nodes: diff --git a/examples/MDF/RNN/RNNs.json b/examples/MDF/RNN/RNNs.json index 6938697be..46822464c 100644 --- a/examples/MDF/RNN/RNNs.json +++ b/examples/MDF/RNN/RNNs.json @@ -1,7 +1,7 @@ { "RNNs": { "format": "ModECI MDF v0.4", - "generating_application": "Python modeci-mdf v0.4.8", + "generating_application": "Python modeci-mdf v0.4.9", "graphs": { "rnn_example": { "nodes": { diff --git a/examples/MDF/RNN/RNNs.yaml b/examples/MDF/RNN/RNNs.yaml index dbab3029b..502355c5d 100644 --- a/examples/MDF/RNN/RNNs.yaml +++ b/examples/MDF/RNN/RNNs.yaml @@ -1,6 +1,6 @@ RNNs: format: ModECI MDF v0.4 - generating_application: Python modeci-mdf v0.4.8 + generating_application: Python modeci-mdf v0.4.9 graphs: rnn_example: nodes: diff --git a/examples/MDF/Simple.bson b/examples/MDF/Simple.bson index 63a790722..e65b3ee9b 100644 Binary files a/examples/MDF/Simple.bson and b/examples/MDF/Simple.bson differ diff --git a/examples/MDF/Simple.json b/examples/MDF/Simple.json index 3fff8a0df..fdede522b 100644 --- a/examples/MDF/Simple.json +++ b/examples/MDF/Simple.json @@ -1,7 +1,7 @@ { "Simple": { "format": "ModECI MDF v0.4", - "generating_application": "Python modeci-mdf v0.4.8", + "generating_application": "Python modeci-mdf v0.4.9", "graphs": { "simple_example": { "nodes": { diff --git a/examples/MDF/Simple.yaml b/examples/MDF/Simple.yaml index cd3e8197d..eb7cf6739 100644 --- a/examples/MDF/Simple.yaml +++ b/examples/MDF/Simple.yaml @@ -1,6 +1,6 @@ Simple: format: ModECI MDF v0.4 - generating_application: Python modeci-mdf v0.4.8 + generating_application: Python modeci-mdf v0.4.9 graphs: simple_example: nodes: diff --git a/examples/MDF/States.json b/examples/MDF/States.json index 8e6a221e8..fafde25a5 100644 --- a/examples/MDF/States.json +++ b/examples/MDF/States.json @@ -1,7 +1,7 @@ { "States": { "format": "ModECI MDF v0.4", - "generating_application": "Python modeci-mdf v0.4.8", + "generating_application": "Python modeci-mdf v0.4.9", "graphs": { "state_example": { "nodes": { diff --git a/examples/MDF/States.yaml b/examples/MDF/States.yaml index 1b70017ef..cc5261612 100644 --- a/examples/MDF/States.yaml +++ b/examples/MDF/States.yaml @@ -1,6 +1,6 @@ States: format: ModECI MDF v0.4 - generating_application: Python modeci-mdf v0.4.8 + generating_application: Python modeci-mdf v0.4.9 graphs: state_example: nodes: diff --git a/examples/MDF/abc_conditions.json b/examples/MDF/abc_conditions.json index 742499cd3..4628ce1c3 100644 --- a/examples/MDF/abc_conditions.json +++ b/examples/MDF/abc_conditions.json @@ -1,7 +1,7 @@ { "abc_conditions": { "format": "ModECI MDF v0.4", - "generating_application": "Python modeci-mdf v0.4.8", + "generating_application": "Python modeci-mdf v0.4.9", "graphs": { "abc_conditions_example": { "nodes": { diff --git a/examples/MDF/abc_conditions.yaml b/examples/MDF/abc_conditions.yaml index 7f193a17a..c6c1abb75 100644 --- a/examples/MDF/abc_conditions.yaml +++ b/examples/MDF/abc_conditions.yaml @@ -1,6 +1,6 @@ abc_conditions: format: ModECI MDF v0.4 - generating_application: Python modeci-mdf v0.4.8 + generating_application: Python modeci-mdf v0.4.9 graphs: abc_conditions_example: nodes: diff --git a/examples/MDF/conditions/Composite_mdf_condition.json b/examples/MDF/conditions/Composite_mdf_condition.json index 6bcfef920..5c7ba98e6 100644 --- a/examples/MDF/conditions/Composite_mdf_condition.json +++ b/examples/MDF/conditions/Composite_mdf_condition.json @@ -1,7 +1,7 @@ { "Composite_mdf_condition": { "format": "ModECI MDF v0.4", - "generating_application": "Python modeci-mdf v0.4.8", + "generating_application": "Python modeci-mdf v0.4.9", "graphs": { "Composite_mdf_condition_example": { "nodes": { diff --git a/examples/MDF/conditions/Composite_mdf_condition.yaml b/examples/MDF/conditions/Composite_mdf_condition.yaml index f1e66afea..ef8c5ea91 100644 --- a/examples/MDF/conditions/Composite_mdf_condition.yaml +++ b/examples/MDF/conditions/Composite_mdf_condition.yaml @@ -1,6 +1,6 @@ Composite_mdf_condition: format: ModECI MDF v0.4 - generating_application: Python modeci-mdf v0.4.8 + generating_application: Python modeci-mdf v0.4.9 graphs: Composite_mdf_condition_example: nodes: diff --git a/examples/MDF/conditions/everyncalls_condition.json b/examples/MDF/conditions/everyncalls_condition.json index faba8de62..33c8ea4ab 100644 --- a/examples/MDF/conditions/everyncalls_condition.json +++ b/examples/MDF/conditions/everyncalls_condition.json @@ -1,7 +1,7 @@ { "everyncalls_condition": { "format": "ModECI MDF v0.4", - "generating_application": "Python modeci-mdf v0.4.8", + "generating_application": "Python modeci-mdf v0.4.9", "graphs": { "everyncalls_example": { "nodes": { diff --git a/examples/MDF/conditions/everyncalls_condition.yaml b/examples/MDF/conditions/everyncalls_condition.yaml index e7b29dcd5..6efec5916 100644 --- a/examples/MDF/conditions/everyncalls_condition.yaml +++ b/examples/MDF/conditions/everyncalls_condition.yaml @@ -1,6 +1,6 @@ everyncalls_condition: format: ModECI MDF v0.4 - generating_application: Python modeci-mdf v0.4.8 + generating_application: Python modeci-mdf v0.4.9 graphs: everyncalls_example: nodes: diff --git a/examples/MDF/conditions/threshold_condition.json b/examples/MDF/conditions/threshold_condition.json index 5ccec3bf8..191b9afda 100644 --- a/examples/MDF/conditions/threshold_condition.json +++ b/examples/MDF/conditions/threshold_condition.json @@ -1,7 +1,7 @@ { "threshold_condition": { "format": "ModECI MDF v0.4", - "generating_application": "Python modeci-mdf v0.4.8", + "generating_application": "Python modeci-mdf v0.4.9", "graphs": { "threshold_example": { "nodes": { diff --git a/examples/MDF/conditions/threshold_condition.yaml b/examples/MDF/conditions/threshold_condition.yaml index 3ec37aa77..87f3dcdce 100644 --- a/examples/MDF/conditions/threshold_condition.yaml +++ b/examples/MDF/conditions/threshold_condition.yaml @@ -1,6 +1,6 @@ threshold_condition: format: ModECI MDF v0.4 - generating_application: Python modeci-mdf v0.4.8 + generating_application: Python modeci-mdf v0.4.9 graphs: threshold_example: nodes: diff --git a/examples/MDF/conditions/timeinterval_condition.json b/examples/MDF/conditions/timeinterval_condition.json index c2c09b117..9962404d6 100644 --- a/examples/MDF/conditions/timeinterval_condition.json +++ b/examples/MDF/conditions/timeinterval_condition.json @@ -1,7 +1,7 @@ { "timeinterval_condition": { "format": "ModECI MDF v0.4", - "generating_application": "Python modeci-mdf v0.4.8", + "generating_application": "Python modeci-mdf v0.4.9", "graphs": { "timeinterval_example": { "nodes": { diff --git a/examples/MDF/conditions/timeinterval_condition.yaml b/examples/MDF/conditions/timeinterval_condition.yaml index 8916befbd..0c08e5c25 100644 --- a/examples/MDF/conditions/timeinterval_condition.yaml +++ b/examples/MDF/conditions/timeinterval_condition.yaml @@ -1,6 +1,6 @@ timeinterval_condition: format: ModECI MDF v0.4 - generating_application: Python modeci-mdf v0.4.8 + generating_application: Python modeci-mdf v0.4.9 graphs: timeinterval_example: nodes: diff --git a/examples/NeuroML/LEMS_SimABCD.xml b/examples/NeuroML/LEMS_SimABCD.xml index 90571f198..8d6f20e1b 100644 --- a/examples/NeuroML/LEMS_SimABCD.xml +++ b/examples/NeuroML/LEMS_SimABCD.xml @@ -2,7 +2,7 @@ @@ -19,7 +19,6 @@ - diff --git a/examples/NeuroML/LEMS_SimFN.xml b/examples/NeuroML/LEMS_SimFN.xml index 5463b1959..30878a37b 100644 --- a/examples/NeuroML/LEMS_SimFN.xml +++ b/examples/NeuroML/LEMS_SimFN.xml @@ -2,7 +2,7 @@ @@ -19,7 +19,6 @@ - diff --git a/examples/NeuroML/LEMS_SimIzhikevichTest.xml b/examples/NeuroML/LEMS_SimIzhikevichTest.xml index 80e907601..a38e06e3e 100644 --- a/examples/NeuroML/LEMS_SimIzhikevichTest.xml +++ b/examples/NeuroML/LEMS_SimIzhikevichTest.xml @@ -2,7 +2,7 @@ @@ -18,7 +18,6 @@ - diff --git a/examples/NeuroML/PyNN/HH.mdf.yaml b/examples/NeuroML/PyNN/HH.mdf.yaml index d3d7bc25d..54cfe1f1d 100644 --- a/examples/NeuroML/PyNN/HH.mdf.yaml +++ b/examples/NeuroML/PyNN/HH.mdf.yaml @@ -135,10 +135,10 @@ HH: \ bindsnet_node=None)] is defined in None and in Lems is:\ \ Component, id: testcell, type: HH_cond_exp,\n parameters:\ \ {'cm': '0.2', 'i_offset': 0.05, 'tau_syn_E': '0.2', 'tau_syn_I':\ - \ '2.', 'v_init': '-65.', 'v_offset': '-63.', 'e_rev_E': '0.',\ - \ 'e_rev_I': '-80.', 'e_rev_K': '-90.', 'e_rev_Na': '50.',\ - \ 'e_rev_leak': '-65.', 'g_leak': '0.01', 'gbar_K': '6.',\ - \ 'gbar_Na': '20.'}\n parent: None\n" + \ '2.0', 'v_init': '-65.0', 'v_offset': '-63.0', 'e_rev_E':\ + \ '0.0', 'e_rev_I': '-80.0', 'e_rev_K': '-90.0', 'e_rev_Na':\ + \ '50.0', 'e_rev_leak': '-65.0', 'g_leak': '0.01', 'gbar_K':\ + \ '6.0', 'gbar_Na': '20.0'}\n parent: None\n" InputList_stim: parameters: delay: diff --git a/examples/NeuroML/PyNN/HH.net.nml b/examples/NeuroML/PyNN/HH.net.nml index 46da98e61..202428b18 100644 --- a/examples/NeuroML/PyNN/HH.net.nml +++ b/examples/NeuroML/PyNN/HH.net.nml @@ -5,8 +5,8 @@ NeuroMLlite parameters: input_amp = 0 - - + + Example: HH diff --git a/examples/NeuroML/PyNN/InputWeights.mdf.yaml b/examples/NeuroML/PyNN/InputWeights.mdf.yaml index c0aa1a66c..4f48280a4 100644 --- a/examples/NeuroML/PyNN/InputWeights.mdf.yaml +++ b/examples/NeuroML/PyNN/InputWeights.mdf.yaml @@ -185,9 +185,9 @@ InputWeights: \ neuroml2_cell=None, pynn_cell='IF_curr_alpha', arbor_cell=None,\ \ bindsnet_node=None)] is defined in None and in Lems is:\ \ Component, id: testcell, type: IF_curr_alpha,\n parameters:\ - \ {'cm': '1.', 'i_offset': 0.0, 'tau_syn_E': '0.5', 'tau_syn_I':\ - \ '0.5', 'v_init': '-65.', 'tau_m': '20.', 'tau_refrac': 5,\ - \ 'v_reset': '-65.', 'v_rest': '-65.', 'v_thresh': '-50.'}\n\ + \ {'cm': '1.0', 'i_offset': 0.0, 'tau_syn_E': '0.5', 'tau_syn_I':\ + \ '0.5', 'v_init': '-65.0', 'tau_m': '20.0', 'tau_refrac':\ + \ 5, 'v_reset': '-65.0', 'v_rest': '-65.0', 'v_thresh': '-50.0'}\n\ \ parent: None\n" InputList_stim: parameters: diff --git a/examples/NeuroML/PyNN/InputWeights.net.nml b/examples/NeuroML/PyNN/InputWeights.net.nml index 52581ad5f..f0bd24f1d 100644 --- a/examples/NeuroML/PyNN/InputWeights.net.nml +++ b/examples/NeuroML/PyNN/InputWeights.net.nml @@ -5,8 +5,8 @@ NeuroMLlite parameters: input_amp = 0.99 - - + + Example: InputWeights diff --git a/examples/NeuroML/PyNN/Net1.mdf.yaml b/examples/NeuroML/PyNN/Net1.mdf.yaml index f99d318f4..b48cc28f6 100644 --- a/examples/NeuroML/PyNN/Net1.mdf.yaml +++ b/examples/NeuroML/PyNN/Net1.mdf.yaml @@ -153,9 +153,9 @@ Net1: \ neuroml2_cell=None, pynn_cell='IF_curr_alpha', arbor_cell=None,\ \ bindsnet_node=None)] is defined in None and in Lems is:\ \ Component, id: testcell, type: IF_curr_alpha,\n parameters:\ - \ {'cm': '1.', 'i_offset': 0.0, 'tau_syn_E': '20.', 'tau_syn_I':\ - \ '0.5', 'v_init': '-65.', 'tau_m': '20.', 'tau_refrac': 5,\ - \ 'v_reset': '-65.', 'v_rest': '-65.', 'v_thresh': '-50.'}\n\ + \ {'cm': '1.0', 'i_offset': 0.0, 'tau_syn_E': '20.0', 'tau_syn_I':\ + \ '0.5', 'v_init': '-65.0', 'tau_m': '20.0', 'tau_refrac':\ + \ 5, 'v_reset': '-65.0', 'v_rest': '-65.0', 'v_thresh': '-50.0'}\n\ \ parent: None\n" pop1: metadata: @@ -322,9 +322,9 @@ Net1: \ neuroml2_cell=None, pynn_cell='IF_curr_alpha', arbor_cell=None,\ \ bindsnet_node=None)] is defined in None and in Lems is:\ \ Component, id: testcell, type: IF_curr_alpha,\n parameters:\ - \ {'cm': '1.', 'i_offset': 0.0, 'tau_syn_E': '20.', 'tau_syn_I':\ - \ '0.5', 'v_init': '-65.', 'tau_m': '20.', 'tau_refrac': 5,\ - \ 'v_reset': '-65.', 'v_rest': '-65.', 'v_thresh': '-50.'}\n\ + \ {'cm': '1.0', 'i_offset': 0.0, 'tau_syn_E': '20.0', 'tau_syn_I':\ + \ '0.5', 'v_init': '-65.0', 'tau_m': '20.0', 'tau_refrac':\ + \ 5, 'v_reset': '-65.0', 'v_rest': '-65.0', 'v_thresh': '-50.0'}\n\ \ parent: None\n" proj0_ampaSyn: parameters: diff --git a/examples/NeuroML/PyNN/Net1.net.nml b/examples/NeuroML/PyNN/Net1.net.nml index cd0236f1a..0b173eb58 100644 --- a/examples/NeuroML/PyNN/Net1.net.nml +++ b/examples/NeuroML/PyNN/Net1.net.nml @@ -5,8 +5,8 @@ NeuroMLlite parameters: input_amp = 0.99 - - + + Example: Net1 diff --git a/examples/NeuroML/PyNN/OneCell.mdf.yaml b/examples/NeuroML/PyNN/OneCell.mdf.yaml index 2d0fe4ac2..ffdc977a1 100644 --- a/examples/NeuroML/PyNN/OneCell.mdf.yaml +++ b/examples/NeuroML/PyNN/OneCell.mdf.yaml @@ -137,9 +137,9 @@ OneCell: \ neuroml2_cell=None, pynn_cell='IF_curr_alpha', arbor_cell=None,\ \ bindsnet_node=None)] is defined in None and in Lems is:\ \ Component, id: testcell, type: IF_curr_alpha,\n parameters:\ - \ {'cm': '1.', 'i_offset': 0.0, 'tau_syn_E': '0.5', 'tau_syn_I':\ - \ '0.5', 'v_init': '-65.', 'tau_m': '20.', 'tau_refrac': 5,\ - \ 'v_reset': '-65.', 'v_rest': '-65.', 'v_thresh': '-50.'}\n\ + \ {'cm': '1.0', 'i_offset': 0.0, 'tau_syn_E': '0.5', 'tau_syn_I':\ + \ '0.5', 'v_init': '-65.0', 'tau_m': '20.0', 'tau_refrac':\ + \ 5, 'v_reset': '-65.0', 'v_rest': '-65.0', 'v_thresh': '-50.0'}\n\ \ parent: None\n" InputList_stim: parameters: diff --git a/examples/NeuroML/PyNN/OneCell.net.nml b/examples/NeuroML/PyNN/OneCell.net.nml index 6d5b4c08e..3e8945063 100644 --- a/examples/NeuroML/PyNN/OneCell.net.nml +++ b/examples/NeuroML/PyNN/OneCell.net.nml @@ -5,8 +5,8 @@ NeuroMLlite parameters: input_amp = 0.99 - - + + Example: OneCell diff --git a/examples/NeuroML/PyNN/SimpleNet.mdf.yaml b/examples/NeuroML/PyNN/SimpleNet.mdf.yaml index 03d480d03..069a77bc4 100644 --- a/examples/NeuroML/PyNN/SimpleNet.mdf.yaml +++ b/examples/NeuroML/PyNN/SimpleNet.mdf.yaml @@ -137,9 +137,9 @@ SimpleNet: \ neuroml2_cell=None, pynn_cell='IF_curr_alpha', arbor_cell=None,\ \ bindsnet_node=None)] is defined in None and in Lems is:\ \ Component, id: testcell, type: IF_curr_alpha,\n parameters:\ - \ {'cm': '1.', 'i_offset': 0.0, 'tau_syn_E': '20.', 'tau_syn_I':\ - \ '0.5', 'v_init': '-65.', 'tau_m': '20.', 'tau_refrac': 5,\ - \ 'v_reset': '-65.', 'v_rest': '-65.', 'v_thresh': '-50.'}\n\ + \ {'cm': '1.0', 'i_offset': 0.0, 'tau_syn_E': '20.0', 'tau_syn_I':\ + \ '0.5', 'v_init': '-65.0', 'tau_m': '20.0', 'tau_refrac':\ + \ 5, 'v_reset': '-65.0', 'v_rest': '-65.0', 'v_thresh': '-50.0'}\n\ \ parent: None\n" pop1: metadata: @@ -274,9 +274,9 @@ SimpleNet: \ neuroml2_cell=None, pynn_cell='IF_curr_alpha', arbor_cell=None,\ \ bindsnet_node=None)] is defined in None and in Lems is:\ \ Component, id: testcell, type: IF_curr_alpha,\n parameters:\ - \ {'cm': '1.', 'i_offset': 0.0, 'tau_syn_E': '20.', 'tau_syn_I':\ - \ '0.5', 'v_init': '-65.', 'tau_m': '20.', 'tau_refrac': 5,\ - \ 'v_reset': '-65.', 'v_rest': '-65.', 'v_thresh': '-50.'}\n\ + \ {'cm': '1.0', 'i_offset': 0.0, 'tau_syn_E': '20.0', 'tau_syn_I':\ + \ '0.5', 'v_init': '-65.0', 'tau_m': '20.0', 'tau_refrac':\ + \ 5, 'v_reset': '-65.0', 'v_rest': '-65.0', 'v_thresh': '-50.0'}\n\ \ parent: None\n" proj0_ampaSyn: parameters: diff --git a/examples/NeuroML/PyNN/SimpleNet.net.nml b/examples/NeuroML/PyNN/SimpleNet.net.nml index fd1d60972..3788e7fe8 100644 --- a/examples/NeuroML/PyNN/SimpleNet.net.nml +++ b/examples/NeuroML/PyNN/SimpleNet.net.nml @@ -5,8 +5,8 @@ NeuroMLlite parameters: input_amp = 0.99 - - + + Example: SimpleNet diff --git a/examples/ONNX/ab.json b/examples/ONNX/ab.json index 35add3eef..71bcd02fe 100644 --- a/examples/ONNX/ab.json +++ b/examples/ONNX/ab.json @@ -1,11 +1,11 @@ { "ONNX Model": { "format": "ModECI MDF v0.4", - "generating_application": "Python modeci-mdf v0.4.8", + "generating_application": "Python modeci-mdf v0.4.9", "graphs": { - "torch-jit-export": { + "torch_jit": { "nodes": { - "Add_1": { + "/A/Add": { "input_ports": { "input": { "shape": [ @@ -19,7 +19,7 @@ "B": { "value": 1.0 }, - "Add_1": { + "/A/Add": { "function": "onnx::Add", "args": { "A": "input", @@ -28,14 +28,14 @@ } }, "output_ports": { - "onnx_Mul_2": { - "value": "Add_1" + "_A_Add_output_0": { + "value": "/A/Add" } } }, - "Mul_3": { + "/B/Mul": { "input_ports": { - "onnx_Mul_2": { + "_A_Add_output_0": { "shape": [ 2, 3 @@ -47,27 +47,27 @@ "B": { "value": 5.0 }, - "Mul_3": { + "/B/Mul": { "function": "onnx::Mul", "args": { - "A": "onnx_Mul_2", + "A": "_A_Add_output_0", "B": "B" } } }, "output_ports": { "_4": { - "value": "Mul_3" + "value": "/B/Mul" } } } }, "edges": { - "Add_1.onnx_Mul_2_Mul_3.onnx_Mul_2": { - "sender": "Add_1", - "receiver": "Mul_3", - "sender_port": "onnx_Mul_2", - "receiver_port": "onnx_Mul_2" + "/A/Add._A_Add_output_0_/B/Mul._A_Add_output_0": { + "sender": "/A/Add", + "receiver": "/B/Mul", + "sender_port": "_A_Add_output_0", + "receiver_port": "_A_Add_output_0" } } } diff --git a/examples/ONNX/ab.png b/examples/ONNX/ab.png index d45792450..9571df211 100644 Binary files a/examples/ONNX/ab.png and b/examples/ONNX/ab.png differ diff --git a/examples/ONNX/ab.yaml b/examples/ONNX/ab.yaml index a0aa973b8..0b865a360 100644 --- a/examples/ONNX/ab.yaml +++ b/examples/ONNX/ab.yaml @@ -1,10 +1,10 @@ ONNX Model: format: ModECI MDF v0.4 - generating_application: Python modeci-mdf v0.4.8 + generating_application: Python modeci-mdf v0.4.9 graphs: - torch-jit-export: + torch_jit: nodes: - Add_1: + /A/Add: input_ports: input: shape: @@ -14,17 +14,17 @@ ONNX Model: parameters: B: value: 1.0 - Add_1: + /A/Add: function: onnx::Add args: A: input B: B output_ports: - onnx_Mul_2: - value: Add_1 - Mul_3: + _A_Add_output_0: + value: /A/Add + /B/Mul: input_ports: - onnx_Mul_2: + _A_Add_output_0: shape: - 2 - 3 @@ -32,17 +32,17 @@ ONNX Model: parameters: B: value: 5.0 - Mul_3: + /B/Mul: function: onnx::Mul args: - A: onnx_Mul_2 + A: _A_Add_output_0 B: B output_ports: _4: - value: Mul_3 + value: /B/Mul edges: - Add_1.onnx_Mul_2_Mul_3.onnx_Mul_2: - sender: Add_1 - receiver: Mul_3 - sender_port: onnx_Mul_2 - receiver_port: onnx_Mul_2 + /A/Add._A_Add_output_0_/B/Mul._A_Add_output_0: + sender: /A/Add + receiver: /B/Mul + sender_port: _A_Add_output_0 + receiver_port: _A_Add_output_0 diff --git a/examples/ONNX/abc.json b/examples/ONNX/abc.json index 882e62776..aff9bbcf7 100644 --- a/examples/ONNX/abc.json +++ b/examples/ONNX/abc.json @@ -1,11 +1,11 @@ { "ONNX Model": { "format": "ModECI MDF v0.4", - "generating_application": "Python modeci-mdf v0.4.8", + "generating_application": "Python modeci-mdf v0.4.9", "graphs": { - "torch-jit-export": { + "torch_jit": { "nodes": { - "Add_1": { + "/A/Add": { "input_ports": { "input": { "shape": [ @@ -19,7 +19,7 @@ "B": { "value": 1.0 }, - "Add_1": { + "/A/Add": { "function": "onnx::Add", "args": { "A": "input", @@ -28,8 +28,8 @@ } }, "output_ports": { - "onnx_Loop_3": { - "value": "Add_1" + "_A_Add_output_0": { + "value": "/A/Add" } } }, @@ -41,7 +41,7 @@ ], "type": "float" }, - "onnx_Loop_3": { + "_A_Add_output_0": { "shape": [ 2, 3 @@ -52,12 +52,12 @@ "parameters": { "body": { "value": { - "graph_torch-jit-export1": { - "id": "torch-jit-export1", + "graph_torch_jit1": { + "id": "torch_jit1", "nodes": { "Div_5": { "input_ports": { - "x_11": { + "x.11": { "shape": [ 2, 3 @@ -72,7 +72,7 @@ "Div_5": { "function": "onnx::Div", "args": { - "A": "x_11", + "A": "x.11", "B": "B" } } @@ -113,19 +113,19 @@ "args": { "M": "B_loop_count", "cond": "cond", - "v_initial": "onnx_Loop_3" + "v_initial": "_A_Add_output_0" } } }, "output_ports": { - "x_3": { + "x.3": { "value": "Loop_3" } } }, - "Mul_8": { + "/C/Mul": { "input_ports": { - "x_3": { + "x.3": { "shape": [ 1 ], @@ -136,33 +136,33 @@ "B": { "value": 100.0 }, - "Mul_8": { + "/C/Mul": { "function": "onnx::Mul", "args": { - "A": "x_3", + "A": "x.3", "B": "B" } } }, "output_ports": { "_13": { - "value": "Mul_8" + "value": "/C/Mul" } } } }, "edges": { - "Add_1.onnx_Loop_3_Loop_3.onnx_Loop_3": { - "sender": "Add_1", + "/A/Add._A_Add_output_0_Loop_3._A_Add_output_0": { + "sender": "/A/Add", "receiver": "Loop_3", - "sender_port": "onnx_Loop_3", - "receiver_port": "onnx_Loop_3" + "sender_port": "_A_Add_output_0", + "receiver_port": "_A_Add_output_0" }, - "Loop_3.x_3_Mul_8.x_3": { + "Loop_3.x.3_/C/Mul.x.3": { "sender": "Loop_3", - "receiver": "Mul_8", - "sender_port": "x_3", - "receiver_port": "x_3" + "receiver": "/C/Mul", + "sender_port": "x.3", + "receiver_port": "x.3" } } } diff --git a/examples/ONNX/abc.yaml b/examples/ONNX/abc.yaml index deaee941d..521607228 100644 --- a/examples/ONNX/abc.yaml +++ b/examples/ONNX/abc.yaml @@ -1,10 +1,10 @@ ONNX Model: format: ModECI MDF v0.4 - generating_application: Python modeci-mdf v0.4.8 + generating_application: Python modeci-mdf v0.4.9 graphs: - torch-jit-export: + torch_jit: nodes: - Add_1: + /A/Add: input_ports: input: shape: @@ -14,21 +14,21 @@ ONNX Model: parameters: B: value: 1.0 - Add_1: + /A/Add: function: onnx::Add args: A: input B: B output_ports: - onnx_Loop_3: - value: Add_1 + _A_Add_output_0: + value: /A/Add Loop_3: input_ports: B_loop_count: shape: - 1 type: float - onnx_Loop_3: + _A_Add_output_0: shape: - 2 - 3 @@ -36,12 +36,12 @@ ONNX Model: parameters: body: value: - graph_torch-jit-export1: - id: torch-jit-export1 + graph_torch_jit1: + id: torch_jit1 nodes: Div_5: input_ports: - x_11: + x.11: shape: - 2 - 3 @@ -52,7 +52,7 @@ ONNX Model: Div_5: function: onnx::Div args: - A: x_11 + A: x.11 B: B output_ports: x: @@ -75,35 +75,35 @@ ONNX Model: args: M: B_loop_count cond: cond - v_initial: onnx_Loop_3 + v_initial: _A_Add_output_0 output_ports: - x_3: + x.3: value: Loop_3 - Mul_8: + /C/Mul: input_ports: - x_3: + x.3: shape: - 1 type: float parameters: B: value: 100.0 - Mul_8: + /C/Mul: function: onnx::Mul args: - A: x_3 + A: x.3 B: B output_ports: _13: - value: Mul_8 + value: /C/Mul edges: - Add_1.onnx_Loop_3_Loop_3.onnx_Loop_3: - sender: Add_1 + /A/Add._A_Add_output_0_Loop_3._A_Add_output_0: + sender: /A/Add receiver: Loop_3 - sender_port: onnx_Loop_3 - receiver_port: onnx_Loop_3 - Loop_3.x_3_Mul_8.x_3: + sender_port: _A_Add_output_0 + receiver_port: _A_Add_output_0 + Loop_3.x.3_/C/Mul.x.3: sender: Loop_3 - receiver: Mul_8 - sender_port: x_3 - receiver_port: x_3 + receiver: /C/Mul + sender_port: x.3 + receiver_port: x.3 diff --git a/examples/ONNX/abcd.json b/examples/ONNX/abcd.json index ce0de65b1..34c943e38 100644 --- a/examples/ONNX/abcd.json +++ b/examples/ONNX/abcd.json @@ -1,11 +1,11 @@ { "ONNX Model": { "format": "ModECI MDF v0.4", - "generating_application": "Python modeci-mdf v0.4.8", + "generating_application": "Python modeci-mdf v0.4.9", "graphs": { - "torch-jit-export": { + "torch_jit": { "nodes": { - "Mul_1": { + "/A/Mul": { "input_ports": { "input": { "shape": [ @@ -23,7 +23,7 @@ ] ] }, - "Mul_1": { + "/A/Mul": { "function": "onnx::Mul", "args": { "A": "A", @@ -32,14 +32,14 @@ } }, "output_ports": { - "onnx_Add_2": { - "value": "Mul_1" + "_A_Mul_output_0": { + "value": "/A/Mul" } } }, - "Add_3": { + "/A/Add": { "input_ports": { - "onnx_Add_2": { + "_A_Mul_output_0": { "shape": [ 1, 1 @@ -55,23 +55,23 @@ ] ] }, - "Add_3": { + "/A/Add": { "function": "onnx::Add", "args": { - "A": "onnx_Add_2", + "A": "_A_Mul_output_0", "B": "B" } } }, "output_ports": { - "a": { - "value": "Add_3" + "_A_Add_output_0": { + "value": "/A/Add" } } }, - "Mul_5": { + "/B/Mul": { "input_ports": { - "a": { + "_A_Add_output_0": { "shape": [ 1, 1 @@ -87,23 +87,23 @@ ] ] }, - "Mul_5": { + "/B/Mul": { "function": "onnx::Mul", "args": { "A": "A", - "B": "a" + "B": "_A_Add_output_0" } } }, "output_ports": { - "onnx_Add_6": { - "value": "Mul_5" + "_B_Mul_output_0": { + "value": "/B/Mul" } } }, - "Add_7": { + "/B/Add": { "input_ports": { - "onnx_Add_6": { + "_B_Mul_output_0": { "shape": [ 1, 1 @@ -119,23 +119,23 @@ ] ] }, - "Add_7": { + "/B/Add": { "function": "onnx::Add", "args": { - "A": "onnx_Add_6", + "A": "_B_Mul_output_0", "B": "B" } } }, "output_ports": { - "b": { - "value": "Add_7" + "_B_Add_output_0": { + "value": "/B/Add" } } }, - "Mul_9": { + "/C/Mul": { "input_ports": { - "b": { + "_B_Add_output_0": { "shape": [ 1, 1 @@ -151,23 +151,23 @@ ] ] }, - "Mul_9": { + "/C/Mul": { "function": "onnx::Mul", "args": { "A": "A", - "B": "b" + "B": "_B_Add_output_0" } } }, "output_ports": { - "onnx_Add_10": { - "value": "Mul_9" + "_C_Mul_output_0": { + "value": "/C/Mul" } } }, - "Add_11": { + "/C/Add": { "input_ports": { - "onnx_Add_10": { + "_C_Mul_output_0": { "shape": [ 1, 1 @@ -183,23 +183,23 @@ ] ] }, - "Add_11": { + "/C/Add": { "function": "onnx::Add", "args": { - "A": "onnx_Add_10", + "A": "_C_Mul_output_0", "B": "B" } } }, "output_ports": { "c": { - "value": "Add_11" + "value": "/C/Add" } } }, - "Add_13": { + "/D/Add": { "input_ports": { - "onnx_Add_10": { + "_C_Mul_output_0": { "shape": [ 1, 1 @@ -215,57 +215,57 @@ ] ] }, - "Add_13": { + "/D/Add": { "function": "onnx::Add", "args": { - "A": "onnx_Add_10", + "A": "_C_Mul_output_0", "B": "B" } } }, "output_ports": { "d": { - "value": "Add_13" + "value": "/D/Add" } } } }, "edges": { - "Mul_1.onnx_Add_2_Add_3.onnx_Add_2": { - "sender": "Mul_1", - "receiver": "Add_3", - "sender_port": "onnx_Add_2", - "receiver_port": "onnx_Add_2" + "/A/Mul._A_Mul_output_0_/A/Add._A_Mul_output_0": { + "sender": "/A/Mul", + "receiver": "/A/Add", + "sender_port": "_A_Mul_output_0", + "receiver_port": "_A_Mul_output_0" }, - "Add_3.a_Mul_5.a": { - "sender": "Add_3", - "receiver": "Mul_5", - "sender_port": "a", - "receiver_port": "a" + "/A/Add._A_Add_output_0_/B/Mul._A_Add_output_0": { + "sender": "/A/Add", + "receiver": "/B/Mul", + "sender_port": "_A_Add_output_0", + "receiver_port": "_A_Add_output_0" }, - "Mul_5.onnx_Add_6_Add_7.onnx_Add_6": { - "sender": "Mul_5", - "receiver": "Add_7", - "sender_port": "onnx_Add_6", - "receiver_port": "onnx_Add_6" + "/B/Mul._B_Mul_output_0_/B/Add._B_Mul_output_0": { + "sender": "/B/Mul", + "receiver": "/B/Add", + "sender_port": "_B_Mul_output_0", + "receiver_port": "_B_Mul_output_0" }, - "Add_7.b_Mul_9.b": { - "sender": "Add_7", - "receiver": "Mul_9", - "sender_port": "b", - "receiver_port": "b" + "/B/Add._B_Add_output_0_/C/Mul._B_Add_output_0": { + "sender": "/B/Add", + "receiver": "/C/Mul", + "sender_port": "_B_Add_output_0", + "receiver_port": "_B_Add_output_0" }, - "Mul_9.onnx_Add_10_Add_11.onnx_Add_10": { - "sender": "Mul_9", - "receiver": "Add_11", - "sender_port": "onnx_Add_10", - "receiver_port": "onnx_Add_10" + "/C/Mul._C_Mul_output_0_/C/Add._C_Mul_output_0": { + "sender": "/C/Mul", + "receiver": "/C/Add", + "sender_port": "_C_Mul_output_0", + "receiver_port": "_C_Mul_output_0" }, - "Mul_9.onnx_Add_10_Add_13.onnx_Add_10": { - "sender": "Mul_9", - "receiver": "Add_13", - "sender_port": "onnx_Add_10", - "receiver_port": "onnx_Add_10" + "/C/Mul._C_Mul_output_0_/D/Add._C_Mul_output_0": { + "sender": "/C/Mul", + "receiver": "/D/Add", + "sender_port": "_C_Mul_output_0", + "receiver_port": "_C_Mul_output_0" } } } diff --git a/examples/ONNX/abcd.yaml b/examples/ONNX/abcd.yaml index 6ef3114f3..bbe185f1e 100644 --- a/examples/ONNX/abcd.yaml +++ b/examples/ONNX/abcd.yaml @@ -1,10 +1,10 @@ ONNX Model: format: ModECI MDF v0.4 - generating_application: Python modeci-mdf v0.4.8 + generating_application: Python modeci-mdf v0.4.9 graphs: - torch-jit-export: + torch_jit: nodes: - Mul_1: + /A/Mul: input_ports: input: shape: @@ -15,17 +15,17 @@ ONNX Model: A: value: - - 2.0 - Mul_1: + /A/Mul: function: onnx::Mul args: A: A B: input output_ports: - onnx_Add_2: - value: Mul_1 - Add_3: + _A_Mul_output_0: + value: /A/Mul + /A/Add: input_ports: - onnx_Add_2: + _A_Mul_output_0: shape: - 1 - 1 @@ -34,17 +34,17 @@ ONNX Model: B: value: - - 2.0 - Add_3: + /A/Add: function: onnx::Add args: - A: onnx_Add_2 + A: _A_Mul_output_0 B: B output_ports: - a: - value: Add_3 - Mul_5: + _A_Add_output_0: + value: /A/Add + /B/Mul: input_ports: - a: + _A_Add_output_0: shape: - 1 - 1 @@ -53,17 +53,17 @@ ONNX Model: A: value: - - 2.0 - Mul_5: + /B/Mul: function: onnx::Mul args: A: A - B: a + B: _A_Add_output_0 output_ports: - onnx_Add_6: - value: Mul_5 - Add_7: + _B_Mul_output_0: + value: /B/Mul + /B/Add: input_ports: - onnx_Add_6: + _B_Mul_output_0: shape: - 1 - 1 @@ -72,17 +72,17 @@ ONNX Model: B: value: - - 2.0 - Add_7: + /B/Add: function: onnx::Add args: - A: onnx_Add_6 + A: _B_Mul_output_0 B: B output_ports: - b: - value: Add_7 - Mul_9: + _B_Add_output_0: + value: /B/Add + /C/Mul: input_ports: - b: + _B_Add_output_0: shape: - 1 - 1 @@ -91,17 +91,17 @@ ONNX Model: A: value: - - 2.0 - Mul_9: + /C/Mul: function: onnx::Mul args: A: A - B: b + B: _B_Add_output_0 output_ports: - onnx_Add_10: - value: Mul_9 - Add_11: + _C_Mul_output_0: + value: /C/Mul + /C/Add: input_ports: - onnx_Add_10: + _C_Mul_output_0: shape: - 1 - 1 @@ -110,17 +110,17 @@ ONNX Model: B: value: - - 2.0 - Add_11: + /C/Add: function: onnx::Add args: - A: onnx_Add_10 + A: _C_Mul_output_0 B: B output_ports: c: - value: Add_11 - Add_13: + value: /C/Add + /D/Add: input_ports: - onnx_Add_10: + _C_Mul_output_0: shape: - 1 - 1 @@ -129,42 +129,42 @@ ONNX Model: B: value: - - 2.0 - Add_13: + /D/Add: function: onnx::Add args: - A: onnx_Add_10 + A: _C_Mul_output_0 B: B output_ports: d: - value: Add_13 + value: /D/Add edges: - Mul_1.onnx_Add_2_Add_3.onnx_Add_2: - sender: Mul_1 - receiver: Add_3 - sender_port: onnx_Add_2 - receiver_port: onnx_Add_2 - Add_3.a_Mul_5.a: - sender: Add_3 - receiver: Mul_5 - sender_port: a - receiver_port: a - Mul_5.onnx_Add_6_Add_7.onnx_Add_6: - sender: Mul_5 - receiver: Add_7 - sender_port: onnx_Add_6 - receiver_port: onnx_Add_6 - Add_7.b_Mul_9.b: - sender: Add_7 - receiver: Mul_9 - sender_port: b - receiver_port: b - Mul_9.onnx_Add_10_Add_11.onnx_Add_10: - sender: Mul_9 - receiver: Add_11 - sender_port: onnx_Add_10 - receiver_port: onnx_Add_10 - Mul_9.onnx_Add_10_Add_13.onnx_Add_10: - sender: Mul_9 - receiver: Add_13 - sender_port: onnx_Add_10 - receiver_port: onnx_Add_10 + /A/Mul._A_Mul_output_0_/A/Add._A_Mul_output_0: + sender: /A/Mul + receiver: /A/Add + sender_port: _A_Mul_output_0 + receiver_port: _A_Mul_output_0 + /A/Add._A_Add_output_0_/B/Mul._A_Add_output_0: + sender: /A/Add + receiver: /B/Mul + sender_port: _A_Add_output_0 + receiver_port: _A_Add_output_0 + /B/Mul._B_Mul_output_0_/B/Add._B_Mul_output_0: + sender: /B/Mul + receiver: /B/Add + sender_port: _B_Mul_output_0 + receiver_port: _B_Mul_output_0 + /B/Add._B_Add_output_0_/C/Mul._B_Add_output_0: + sender: /B/Add + receiver: /C/Mul + sender_port: _B_Add_output_0 + receiver_port: _B_Add_output_0 + /C/Mul._C_Mul_output_0_/C/Add._C_Mul_output_0: + sender: /C/Mul + receiver: /C/Add + sender_port: _C_Mul_output_0 + receiver_port: _C_Mul_output_0 + /C/Mul._C_Mul_output_0_/D/Add._C_Mul_output_0: + sender: /C/Mul + receiver: /D/Add + sender_port: _C_Mul_output_0 + receiver_port: _C_Mul_output_0 diff --git a/examples/ONNX/simple_ab.py b/examples/ONNX/simple_ab.py index 3b4aa5d39..3418dccdb 100644 --- a/examples/ONNX/simple_ab.py +++ b/examples/ONNX/simple_ab.py @@ -100,7 +100,7 @@ def main(): for t in test_values: print("===================\nEvaluating MDF model with input: %s" % t) eg.evaluate(initializer={"input": t}) - print("Output: %s" % eg.enodes["Mul_3"].evaluable_outputs["_4"].curr_value) + print("Output: %s" % eg.enodes["/B/Mul"].evaluable_outputs["_4"].curr_value) if __name__ == "__main__": diff --git a/examples/PyTorch/MDF_PyTorch/ABCD.onnx b/examples/PyTorch/MDF_PyTorch/ABCD.onnx index 920d4cee4..a952f261d 100644 Binary files a/examples/PyTorch/MDF_PyTorch/ABCD.onnx and b/examples/PyTorch/MDF_PyTorch/ABCD.onnx differ diff --git a/examples/PyTorch/MDF_PyTorch/ABCD_pytorch.py b/examples/PyTorch/MDF_PyTorch/ABCD_pytorch.py index 24412e90e..ad29cc008 100644 --- a/examples/PyTorch/MDF_PyTorch/ABCD_pytorch.py +++ b/examples/PyTorch/MDF_PyTorch/ABCD_pytorch.py @@ -1,5 +1,5 @@ """ -This script has been generated by modeci_mdf v0.4.8. +This script has been generated by modeci_mdf v0.4.9. It is an export of a MDF model (mdf.s - MDF stateful, i.e. full MDF allowing stateful parameters) to PyTorch """ diff --git a/examples/PyTorch/MDF_PyTorch/Arrays.onnx b/examples/PyTorch/MDF_PyTorch/Arrays.onnx index e25dae1b7..aec1faf1e 100644 Binary files a/examples/PyTorch/MDF_PyTorch/Arrays.onnx and b/examples/PyTorch/MDF_PyTorch/Arrays.onnx differ diff --git a/examples/PyTorch/MDF_PyTorch/Arrays_pytorch.py b/examples/PyTorch/MDF_PyTorch/Arrays_pytorch.py index bf6e9c376..ea751284a 100644 --- a/examples/PyTorch/MDF_PyTorch/Arrays_pytorch.py +++ b/examples/PyTorch/MDF_PyTorch/Arrays_pytorch.py @@ -1,5 +1,5 @@ """ -This script has been generated by modeci_mdf v0.4.8. +This script has been generated by modeci_mdf v0.4.9. It is an export of a MDF model (mdf.s - MDF stateful, i.e. full MDF allowing stateful parameters) to PyTorch """ diff --git a/examples/PyTorch/MDF_PyTorch/Simple.onnx b/examples/PyTorch/MDF_PyTorch/Simple.onnx index ae6a1ac85..46fefcd48 100644 Binary files a/examples/PyTorch/MDF_PyTorch/Simple.onnx and b/examples/PyTorch/MDF_PyTorch/Simple.onnx differ diff --git a/examples/PyTorch/MDF_PyTorch/Simple_pytorch.py b/examples/PyTorch/MDF_PyTorch/Simple_pytorch.py index 26841fe8a..9335ca36e 100644 --- a/examples/PyTorch/MDF_PyTorch/Simple_pytorch.py +++ b/examples/PyTorch/MDF_PyTorch/Simple_pytorch.py @@ -1,5 +1,5 @@ """ -This script has been generated by modeci_mdf v0.4.8. +This script has been generated by modeci_mdf v0.4.9. It is an export of a MDF model (mdf.s - MDF stateful, i.e. full MDF allowing stateful parameters) to PyTorch """ diff --git a/examples/PyTorch/PyTorch_MDF/benchmark_script/squeezenet1_1_benchmark.json b/examples/PyTorch/PyTorch_MDF/benchmark_script/squeezenet1_1_benchmark.json index 2d2165a9d..f49ac8cb6 100644 --- a/examples/PyTorch/PyTorch_MDF/benchmark_script/squeezenet1_1_benchmark.json +++ b/examples/PyTorch/PyTorch_MDF/benchmark_script/squeezenet1_1_benchmark.json @@ -2,9 +2,9 @@ { "model_name": "squeezenet1_1", "model_type": "SqueezeNet", - "pytorch_time": 0.0739, + "pytorch_time": 0.0743, "pytorch_predictions": 10, - "mdf_time": 0.6941, + "mdf_time": 0.6978, "mdf_predictions": 10, "node density": 66, "mdf : pytorch ratio": "9.39" diff --git a/examples/PyTorch/inception.json b/examples/PyTorch/inception.json index 138c62051..89a06e624 100644 --- a/examples/PyTorch/inception.json +++ b/examples/PyTorch/inception.json @@ -1,7 +1,7 @@ { "InceptionBlocks": { "format": "ModECI MDF v0.4", - "generating_application": "Python modeci-mdf v0.4.8", + "generating_application": "Python modeci-mdf v0.4.9", "graphs": { "InceptionBlocksGraph": { "nodes": { @@ -117,7 +117,12 @@ "output_ports": { "_99": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, @@ -136,7 +141,12 @@ "type": "float32" }, "_99": { - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { @@ -203,7 +213,13 @@ "output_ports": { "_102": { "value": "onnx_Pad_1", - "type": "None" + "shape": [ + 1, + 64, + 64, + 64 + ], + "type": "float32" } } }, @@ -213,13 +229,16 @@ }, "input_ports": { "_102": { - "type": "None" + "shape": [ + 1, + 64, + 64, + 64 + ], + "type": "float32" } }, "parameters": { - "ceil_mode": { - "value": 0 - }, "kernel_shape": { "value": [ 2, @@ -372,7 +391,12 @@ "output_ports": { "_106": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, @@ -391,7 +415,12 @@ "type": "float32" }, "_106": { - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { @@ -528,7 +557,12 @@ "output_ports": { "_110": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, @@ -547,7 +581,12 @@ "type": "float32" }, "_110": { - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { @@ -684,7 +723,12 @@ "output_ports": { "_114": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, @@ -703,7 +747,12 @@ "type": "float32" }, "_114": { - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { @@ -728,10 +777,7 @@ } } }, - "ConcatFromSequence_117": { - "metadata": { - "color": ".3 .3 .2" - }, + "Shape_118": { "parameters": { "_116": { "value": [ @@ -741,58 +787,37 @@ 1 ] }, - "axis": { - "value": 0 - }, - "new_axis": { - "value": 1 - }, - "onnx_ConcatFromSequence_1": { - "function": "onnx::ConcatFromSequence", - "args": { - "input_sequence": "_116" - } - } - }, - "output_ports": { - "_117": { - "value": "onnx_ConcatFromSequence_1", - "type": "None" - } - } - }, - "Shape_119": { - "input_ports": { - "_117": { - "type": "None" - } - }, - "parameters": { "onnx_Shape_1": { "function": "onnx::Shape", "args": { - "data": "_117" + "data": "_116" } } }, "output_ports": { - "_119": { + "_118": { "value": "onnx_Shape_1", - "type": "None" + "shape": [ + 1 + ], + "type": "int64" } } }, - "Gather_120": { + "Gather_119": { "metadata": { "color": ".2 .3 .3" }, "input_ports": { - "_119": { - "type": "None" + "_118": { + "shape": [ + 1 + ], + "type": "int64" } }, "parameters": { - "_118": { + "_117": { "value": [ 0 ] @@ -803,93 +828,111 @@ "onnx_Gather_1": { "function": "onnx::Gather", "args": { - "data": "_119", - "indices": "_118" + "data": "_118", + "indices": "_117" } } }, "output_ports": { - "_120": { + "_119": { "value": "onnx_Gather_1", - "type": "None" + "shape": [ + 1 + ], + "type": "int64" } } }, - "Mul_123": { + "Mul_122": { "parameters": { - "_121": { + "_120": { "value": 4 }, - "_122": { + "_121": { "value": 2 }, "onnx_Mul_1": { "function": "onnx::Mul", "args": { - "A": "_121", - "B": "_122" + "A": "_120", + "B": "_121" } } }, "output_ports": { - "_123": { + "_122": { "value": "onnx_Mul_1", "type": "int64" } } }, - "Sub_124": { + "Sub_123": { "input_ports": { - "_123": { + "_122": { "type": "int64" }, - "_120": { - "type": "None" + "_119": { + "shape": [ + 1 + ], + "type": "int64" } }, "parameters": { "onnx_Sub_1": { "function": "onnx::Sub", "args": { - "A": "_123", - "B": "_120" + "A": "_122", + "B": "_119" } } }, "output_ports": { - "_124": { + "_123": { "value": "onnx_Sub_1", + "shape": [ + 1 + ], "type": "int64" } } }, - "Cast_125": { - "input_ports": { - "_117": { - "type": "None" - } - }, + "Cast_124": { "parameters": { + "_116": { + "value": [ + 0, + 1, + 0, + 1 + ] + }, "to": { "value": 7 }, "onnx_Cast_1": { "function": "onnx::Cast", "args": { - "input": "_117" + "input": "_116" } } }, "output_ports": { - "_125": { + "_124": { "value": "onnx_Cast_1", - "type": "None" + "shape": [ + 4 + ], + "type": "int64" } } }, - "ConstantOfShape_126": { + "ConstantOfShape_125": { "input_ports": { - "_124": { + "_123": { + "shape": [ + 1 + ], "type": "int64" } }, @@ -902,27 +945,36 @@ "onnx_ConstantOfShape_1": { "function": "onnx::ConstantOfShape", "args": { - "input": "_124" + "input": "_123" } } }, "output_ports": { - "_126": { + "_125": { "value": "onnx_ConstantOfShape_1", - "type": "None" + "shape": [ + 4 + ], + "type": "int64" } } }, - "Concat_127": { + "Concat_126": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_125": { - "type": "None" + "_124": { + "shape": [ + 4 + ], + "type": "int64" }, - "_126": { - "type": "None" + "_125": { + "shape": [ + 4 + ], + "type": "int64" } }, "parameters": { @@ -932,28 +984,34 @@ "onnx_Concat_1": { "function": "onnx::Concat", "args": { - "inputs": "['_125', '_126']" + "inputs": "['_124', '_125']" } } }, "output_ports": { - "_127": { + "_126": { "value": "onnx_Concat_1", - "type": "None" + "shape": [ + 8 + ], + "type": "int64" } } }, - "Reshape_129": { + "Reshape_128": { "metadata": { "color": ".4 .3 .3" }, "input_ports": { - "_127": { - "type": "None" + "_126": { + "shape": [ + 8 + ], + "type": "int64" } }, "parameters": { - "_128": { + "_127": { "value": [ -1, 2 @@ -965,44 +1023,52 @@ "onnx_Reshape_1": { "function": "onnx::Reshape", "args": { - "data": "_127", - "shape": "_128" + "data": "_126", + "shape": "_127" } } }, "output_ports": { - "_129": { + "_128": { "value": "onnx_Reshape_1", - "type": "None" + "shape": [ + 4, + 2 + ], + "type": "int64" } } }, - "Slice_134": { + "Slice_133": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_129": { - "type": "None" + "_128": { + "shape": [ + 4, + 2 + ], + "type": "int64" } }, "parameters": { - "_131": { + "_130": { "value": [ -1 ] }, - "_132": { + "_131": { "value": [ -9223372036854775807 ] }, - "_130": { + "_129": { "value": [ 0 ] }, - "_133": { + "_132": { "value": [ -1 ] @@ -1010,28 +1076,36 @@ "onnx_Slice_1": { "function": "onnx::Slice", "args": { - "data": "_129", - "starts": "_131", - "ends": "_132", - "axes": "_130", - "steps": "_133" + "data": "_128", + "starts": "_130", + "ends": "_131", + "axes": "_129", + "steps": "_132" } } }, "output_ports": { - "_134": { + "_133": { "value": "onnx_Slice_1", - "type": "None" + "shape": [ + 4, + 2 + ], + "type": "int64" } } }, - "Transpose_135": { + "Transpose_134": { "metadata": { "color": ".2 .3 .3" }, "input_ports": { - "_134": { - "type": "None" + "_133": { + "shape": [ + 4, + 2 + ], + "type": "int64" } }, "parameters": { @@ -1044,28 +1118,36 @@ "onnx_Transpose_1": { "function": "onnx::Transpose", "args": { - "data": "_134" + "data": "_133" } } }, "output_ports": { - "_135": { + "_134": { "value": "onnx_Transpose_1", - "type": "None" + "shape": [ + 2, + 4 + ], + "type": "int64" } } }, - "Reshape_137": { + "Reshape_136": { "metadata": { "color": ".4 .3 .3" }, "input_ports": { - "_135": { - "type": "None" + "_134": { + "shape": [ + 2, + 4 + ], + "type": "int64" } }, "parameters": { - "_136": { + "_135": { "value": [ -1 ] @@ -1076,22 +1158,28 @@ "onnx_Reshape_1": { "function": "onnx::Reshape", "args": { - "data": "_135", - "shape": "_136" + "data": "_134", + "shape": "_135" } } }, "output_ports": { - "_137": { + "_136": { "value": "onnx_Reshape_1", - "type": "None" + "shape": [ + 8 + ], + "type": "int64" } } }, - "Cast_138": { + "Cast_137": { "input_ports": { - "_137": { - "type": "None" + "_136": { + "shape": [ + 8 + ], + "type": "int64" } }, "parameters": { @@ -1101,18 +1189,21 @@ "onnx_Cast_1": { "function": "onnx::Cast", "args": { - "input": "_137" + "input": "_136" } } }, "output_ports": { - "_138": { + "_137": { "value": "onnx_Cast_1", - "type": "None" + "shape": [ + 8 + ], + "type": "int64" } } }, - "Pad_140": { + "Pad_139": { "metadata": { "color": ".3 .3 .2" }, @@ -1126,12 +1217,15 @@ ], "type": "float32" }, - "_138": { - "type": "None" + "_137": { + "shape": [ + 8 + ], + "type": "int64" } }, "parameters": { - "_139": { + "_138": { "value": 0.0 }, "mode": { @@ -1141,41 +1235,29 @@ "function": "onnx::Pad", "args": { "data": "_115", - "pads": "_138", - "constant_value": "_139" + "pads": "_137", + "constant_value": "_138" } } }, "output_ports": { - "_140": { + "_139": { "value": "onnx_Pad_1", - "shape": [ - 1, - 48, - 33, - 33 - ], "type": "float32" } } }, - "Pad_142": { + "Pad_141": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_140": { - "shape": [ - 1, - 48, - 33, - 33 - ], + "_139": { "type": "float32" } }, "parameters": { - "_141": { + "_140": { "value": [ 0, 0, @@ -1193,31 +1275,28 @@ "onnx_Pad_1": { "function": "onnx::Pad", "args": { - "data": "_140", - "pads": "_141" + "data": "_139", + "pads": "_140" } } }, "output_ports": { - "_142": { + "_141": { "value": "onnx_Pad_1", - "type": "None" + "type": "float32" } } }, - "AveragePool_143": { + "AveragePool_142": { "metadata": { "color": ".2 .3 .2" }, "input_ports": { - "_142": { - "type": "None" + "_141": { + "type": "float32" } }, "parameters": { - "ceil_mode": { - "value": 0 - }, "kernel_shape": { "value": [ 2, @@ -1241,24 +1320,18 @@ "onnx_AveragePool_1": { "function": "onnx::AveragePool", "args": { - "X": "_142" + "X": "_141" } } }, "output_ports": { - "_143": { + "_142": { "value": "onnx_AveragePool_1", - "shape": [ - 1, - 48, - 32, - 32 - ], "type": "float32" } } }, - "Conv_144": { + "Conv_143": { "metadata": { "color": ".2 .3 .5" }, @@ -1328,7 +1401,7 @@ } }, "output_ports": { - "_144": { + "_143": { "value": "onnx_Conv_1", "shape": [ 1, @@ -1340,7 +1413,7 @@ } } }, - "Unsqueeze_146": { + "Unsqueeze_145": { "metadata": { "color": ".2 .3 .3" }, @@ -1353,7 +1426,7 @@ } }, "parameters": { - "_145": { + "_144": { "value": [ 1, 2 @@ -1363,23 +1436,28 @@ "function": "onnx::Unsqueeze", "args": { "data": "prelu5_weight", - "axes": "_145" + "axes": "_144" } } }, "output_ports": { - "_146": { + "_145": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, - "PRelu_147": { + "PRelu_146": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_144": { + "_143": { "shape": [ 1, 64, @@ -1388,21 +1466,26 @@ ], "type": "float32" }, - "_146": { - "type": "None" + "_145": { + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_144", - "slope": "_146" + "X": "_143", + "slope": "_145" } } }, "output_ports": { - "_147": { + "_146": { "value": "onnx_PRelu_1", "shape": [ 1, @@ -1414,7 +1497,7 @@ } } }, - "Conv_148": { + "Conv_147": { "metadata": { "color": ".2 .3 .5" }, @@ -1484,7 +1567,7 @@ } }, "output_ports": { - "_148": { + "_147": { "value": "onnx_Conv_1", "shape": [ 1, @@ -1496,7 +1579,7 @@ } } }, - "Unsqueeze_150": { + "Unsqueeze_149": { "metadata": { "color": ".2 .3 .3" }, @@ -1509,7 +1592,7 @@ } }, "parameters": { - "_149": { + "_148": { "value": [ 1, 2 @@ -1519,23 +1602,28 @@ "function": "onnx::Unsqueeze", "args": { "data": "prelu6_weight", - "axes": "_149" + "axes": "_148" } } }, "output_ports": { - "_150": { + "_149": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, - "PRelu_151": { + "PRelu_150": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_148": { + "_147": { "shape": [ 1, 48, @@ -1544,21 +1632,26 @@ ], "type": "float32" }, - "_150": { - "type": "None" + "_149": { + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_148", - "slope": "_150" + "X": "_147", + "slope": "_149" } } }, "output_ports": { - "_151": { + "_150": { "value": "onnx_PRelu_1", "shape": [ 1, @@ -1570,12 +1663,12 @@ } } }, - "Conv_152": { + "Conv_151": { "metadata": { "color": ".2 .3 .5" }, "input_ports": { - "_151": { + "_150": { "shape": [ 1, 48, @@ -1633,14 +1726,14 @@ "onnx_Conv_1": { "function": "onnx::Conv", "args": { - "X": "_151", + "X": "_150", "W": "conv2d7_weight", "B": "conv2d7_bias" } } }, "output_ports": { - "_152": { + "_151": { "value": "onnx_Conv_1", "shape": [ 1, @@ -1652,7 +1745,7 @@ } } }, - "Unsqueeze_154": { + "Unsqueeze_153": { "metadata": { "color": ".2 .3 .3" }, @@ -1665,7 +1758,7 @@ } }, "parameters": { - "_153": { + "_152": { "value": [ 1, 2 @@ -1675,23 +1768,28 @@ "function": "onnx::Unsqueeze", "args": { "data": "prelu7_weight", - "axes": "_153" + "axes": "_152" } } }, "output_ports": { - "_154": { + "_153": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, - "PRelu_155": { + "PRelu_154": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_152": { + "_151": { "shape": [ 1, 64, @@ -1700,21 +1798,26 @@ ], "type": "float32" }, - "_154": { - "type": "None" + "_153": { + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_152", - "slope": "_154" + "X": "_151", + "slope": "_153" } } }, "output_ports": { - "_155": { + "_154": { "value": "onnx_PRelu_1", "shape": [ 1, @@ -1726,12 +1829,12 @@ } } }, - "Concat_156": { + "Concat_155": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_147": { + "_146": { "shape": [ 1, 64, @@ -1749,7 +1852,7 @@ ], "type": "float32" }, - "_155": { + "_154": { "shape": [ 1, 64, @@ -1758,13 +1861,7 @@ ], "type": "float32" }, - "_143": { - "shape": [ - 1, - 48, - 32, - 32 - ], + "_142": { "type": "float32" } }, @@ -1775,35 +1872,23 @@ "onnx_Concat_1": { "function": "onnx::Concat", "args": { - "inputs": "['_147', '_111', '_155', '_143']" + "inputs": "['_146', '_111', '_154', '_142']" } } }, "output_ports": { - "_156": { + "_155": { "value": "onnx_Concat_1", - "shape": [ - 1, - 240, - 32, - 32 - ], "type": "float32" } } }, - "Conv_157": { + "Conv_156": { "metadata": { "color": ".2 .3 .5" }, "input_ports": { - "_156": { - "shape": [ - 1, - 240, - 32, - 32 - ], + "_155": { "type": "float32" }, "conv2d8_weight": { @@ -1855,14 +1940,14 @@ "onnx_Conv_1": { "function": "onnx::Conv", "args": { - "X": "_156", + "X": "_155", "W": "conv2d8_weight", "B": "conv2d8_bias" } } }, "output_ports": { - "_157": { + "_156": { "value": "onnx_Conv_1", "shape": [ 1, @@ -1874,18 +1959,12 @@ } } }, - "Conv_158": { + "Conv_157": { "metadata": { "color": ".2 .3 .5" }, "input_ports": { - "_156": { - "shape": [ - 1, - 240, - 32, - 32 - ], + "_155": { "type": "float32" }, "conv2d9_weight": { @@ -1937,14 +2016,14 @@ "onnx_Conv_1": { "function": "onnx::Conv", "args": { - "X": "_156", + "X": "_155", "W": "conv2d9_weight", "B": "conv2d9_bias" } } }, "output_ports": { - "_158": { + "_157": { "value": "onnx_Conv_1", "shape": [ 1, @@ -1956,18 +2035,12 @@ } } }, - "Conv_159": { + "Conv_158": { "metadata": { "color": ".2 .3 .5" }, "input_ports": { - "_156": { - "shape": [ - 1, - 240, - 32, - 32 - ], + "_155": { "type": "float32" }, "conv2d10_weight": { @@ -2019,14 +2092,14 @@ "onnx_Conv_1": { "function": "onnx::Conv", "args": { - "X": "_156", + "X": "_155", "W": "conv2d10_weight", "B": "conv2d10_bias" } } }, "output_ports": { - "_159": { + "_158": { "value": "onnx_Conv_1", "shape": [ 1, @@ -2038,7 +2111,7 @@ } } }, - "Unsqueeze_161": { + "Unsqueeze_160": { "metadata": { "color": ".2 .3 .3" }, @@ -2051,7 +2124,7 @@ } }, "parameters": { - "_160": { + "_159": { "value": [ 1, 2 @@ -2061,23 +2134,28 @@ "function": "onnx::Unsqueeze", "args": { "data": "prelu8_weight", - "axes": "_160" + "axes": "_159" } } }, "output_ports": { - "_161": { + "_160": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, - "PRelu_162": { + "PRelu_161": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_159": { + "_158": { "shape": [ 1, 64, @@ -2086,21 +2164,26 @@ ], "type": "float32" }, - "_161": { - "type": "None" + "_160": { + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_159", - "slope": "_161" + "X": "_158", + "slope": "_160" } } }, "output_ports": { - "_162": { + "_161": { "value": "onnx_PRelu_1", "shape": [ 1, @@ -2112,12 +2195,12 @@ } } }, - "Conv_163": { + "Conv_162": { "metadata": { "color": ".2 .3 .5" }, "input_ports": { - "_162": { + "_161": { "shape": [ 1, 64, @@ -2175,14 +2258,14 @@ "onnx_Conv_1": { "function": "onnx::Conv", "args": { - "X": "_162", + "X": "_161", "W": "conv2d11_weight", "B": "conv2d11_bias" } } }, "output_ports": { - "_163": { + "_162": { "value": "onnx_Conv_1", "shape": [ 1, @@ -2194,7 +2277,7 @@ } } }, - "Unsqueeze_165": { + "Unsqueeze_164": { "metadata": { "color": ".2 .3 .3" }, @@ -2207,7 +2290,7 @@ } }, "parameters": { - "_164": { + "_163": { "value": [ 1, 2 @@ -2217,23 +2300,28 @@ "function": "onnx::Unsqueeze", "args": { "data": "prelu9_weight", - "axes": "_164" + "axes": "_163" } } }, "output_ports": { - "_165": { + "_164": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, - "PRelu_166": { + "PRelu_165": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_163": { + "_162": { "shape": [ 1, 92, @@ -2242,21 +2330,26 @@ ], "type": "float32" }, - "_165": { - "type": "None" + "_164": { + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_163", - "slope": "_165" + "X": "_162", + "slope": "_164" } } }, "output_ports": { - "_166": { + "_165": { "value": "onnx_PRelu_1", "shape": [ 1, @@ -2268,7 +2361,7 @@ } } }, - "Unsqueeze_168": { + "Unsqueeze_167": { "metadata": { "color": ".2 .3 .3" }, @@ -2281,7 +2374,7 @@ } }, "parameters": { - "_167": { + "_166": { "value": [ 1, 2 @@ -2291,23 +2384,28 @@ "function": "onnx::Unsqueeze", "args": { "data": "prelu10_weight", - "axes": "_167" + "axes": "_166" } } }, "output_ports": { - "_168": { + "_167": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, - "PRelu_169": { + "PRelu_168": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_157": { + "_156": { "shape": [ 1, 64, @@ -2316,21 +2414,26 @@ ], "type": "float32" }, - "_168": { - "type": "None" + "_167": { + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_157", - "slope": "_168" + "X": "_156", + "slope": "_167" } } }, "output_ports": { - "_169": { + "_168": { "value": "onnx_PRelu_1", "shape": [ 1, @@ -2342,12 +2445,9 @@ } } }, - "ConcatFromSequence_171": { - "metadata": { - "color": ".3 .3 .2" - }, + "Shape_170": { "parameters": { - "_170": { + "_116": { "value": [ 0, 1, @@ -2355,58 +2455,37 @@ 1 ] }, - "axis": { - "value": 0 - }, - "new_axis": { - "value": 1 - }, - "onnx_ConcatFromSequence_1": { - "function": "onnx::ConcatFromSequence", + "onnx_Shape_1": { + "function": "onnx::Shape", "args": { - "input_sequence": "_170" + "data": "_116" } } }, "output_ports": { - "_171": { - "value": "onnx_ConcatFromSequence_1", - "type": "None" + "_170": { + "value": "onnx_Shape_1", + "shape": [ + 1 + ], + "type": "int64" } } }, - "Shape_173": { - "input_ports": { - "_171": { - "type": "None" - } - }, - "parameters": { - "onnx_Shape_1": { - "function": "onnx::Shape", - "args": { - "data": "_171" - } - } - }, - "output_ports": { - "_173": { - "value": "onnx_Shape_1", - "type": "None" - } - } - }, - "Gather_174": { - "metadata": { - "color": ".2 .3 .3" + "Gather_171": { + "metadata": { + "color": ".2 .3 .3" }, "input_ports": { - "_173": { - "type": "None" + "_170": { + "shape": [ + 1 + ], + "type": "int64" } }, "parameters": { - "_172": { + "_169": { "value": [ 0 ] @@ -2417,93 +2496,111 @@ "onnx_Gather_1": { "function": "onnx::Gather", "args": { - "data": "_173", - "indices": "_172" + "data": "_170", + "indices": "_169" } } }, "output_ports": { - "_174": { + "_171": { "value": "onnx_Gather_1", - "type": "None" + "shape": [ + 1 + ], + "type": "int64" } } }, - "Mul_177": { + "Mul_174": { "parameters": { - "_175": { + "_172": { "value": 4 }, - "_176": { + "_173": { "value": 2 }, "onnx_Mul_1": { "function": "onnx::Mul", "args": { - "A": "_175", - "B": "_176" + "A": "_172", + "B": "_173" } } }, "output_ports": { - "_177": { + "_174": { "value": "onnx_Mul_1", "type": "int64" } } }, - "Sub_178": { + "Sub_175": { "input_ports": { - "_177": { + "_174": { "type": "int64" }, - "_174": { - "type": "None" + "_171": { + "shape": [ + 1 + ], + "type": "int64" } }, "parameters": { "onnx_Sub_1": { "function": "onnx::Sub", "args": { - "A": "_177", - "B": "_174" + "A": "_174", + "B": "_171" } } }, "output_ports": { - "_178": { + "_175": { "value": "onnx_Sub_1", + "shape": [ + 1 + ], "type": "int64" } } }, - "Cast_179": { - "input_ports": { - "_171": { - "type": "None" - } - }, + "Cast_176": { "parameters": { + "_116": { + "value": [ + 0, + 1, + 0, + 1 + ] + }, "to": { "value": 7 }, "onnx_Cast_1": { "function": "onnx::Cast", "args": { - "input": "_171" + "input": "_116" } } }, "output_ports": { - "_179": { + "_176": { "value": "onnx_Cast_1", - "type": "None" + "shape": [ + 4 + ], + "type": "int64" } } }, - "ConstantOfShape_180": { + "ConstantOfShape_177": { "input_ports": { - "_178": { + "_175": { + "shape": [ + 1 + ], "type": "int64" } }, @@ -2516,27 +2613,36 @@ "onnx_ConstantOfShape_1": { "function": "onnx::ConstantOfShape", "args": { - "input": "_178" + "input": "_175" } } }, "output_ports": { - "_180": { + "_177": { "value": "onnx_ConstantOfShape_1", - "type": "None" + "shape": [ + 4 + ], + "type": "int64" } } }, - "Concat_181": { + "Concat_178": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_179": { - "type": "None" + "_176": { + "shape": [ + 4 + ], + "type": "int64" }, - "_180": { - "type": "None" + "_177": { + "shape": [ + 4 + ], + "type": "int64" } }, "parameters": { @@ -2546,28 +2652,34 @@ "onnx_Concat_1": { "function": "onnx::Concat", "args": { - "inputs": "['_179', '_180']" + "inputs": "['_176', '_177']" } } }, "output_ports": { - "_181": { + "_178": { "value": "onnx_Concat_1", - "type": "None" + "shape": [ + 8 + ], + "type": "int64" } } }, - "Reshape_183": { + "Reshape_180": { "metadata": { "color": ".4 .3 .3" }, "input_ports": { - "_181": { - "type": "None" + "_178": { + "shape": [ + 8 + ], + "type": "int64" } }, "parameters": { - "_182": { + "_179": { "value": [ -1, 2 @@ -2579,44 +2691,52 @@ "onnx_Reshape_1": { "function": "onnx::Reshape", "args": { - "data": "_181", - "shape": "_182" + "data": "_178", + "shape": "_179" } } }, "output_ports": { - "_183": { + "_180": { "value": "onnx_Reshape_1", - "type": "None" + "shape": [ + 4, + 2 + ], + "type": "int64" } } }, - "Slice_188": { + "Slice_185": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_183": { - "type": "None" + "_180": { + "shape": [ + 4, + 2 + ], + "type": "int64" } }, "parameters": { - "_185": { + "_182": { "value": [ -1 ] }, - "_186": { + "_183": { "value": [ -9223372036854775807 ] }, - "_184": { + "_181": { "value": [ 0 ] }, - "_187": { + "_184": { "value": [ -1 ] @@ -2624,28 +2744,36 @@ "onnx_Slice_1": { "function": "onnx::Slice", "args": { - "data": "_183", - "starts": "_185", - "ends": "_186", - "axes": "_184", - "steps": "_187" + "data": "_180", + "starts": "_182", + "ends": "_183", + "axes": "_181", + "steps": "_184" } } }, "output_ports": { - "_188": { + "_185": { "value": "onnx_Slice_1", - "type": "None" + "shape": [ + 4, + 2 + ], + "type": "int64" } } }, - "Transpose_189": { + "Transpose_186": { "metadata": { "color": ".2 .3 .3" }, "input_ports": { - "_188": { - "type": "None" + "_185": { + "shape": [ + 4, + 2 + ], + "type": "int64" } }, "parameters": { @@ -2658,28 +2786,36 @@ "onnx_Transpose_1": { "function": "onnx::Transpose", "args": { - "data": "_188" + "data": "_185" } } }, "output_ports": { - "_189": { + "_186": { "value": "onnx_Transpose_1", - "type": "None" + "shape": [ + 2, + 4 + ], + "type": "int64" } } }, - "Reshape_191": { + "Reshape_188": { "metadata": { "color": ".4 .3 .3" }, "input_ports": { - "_189": { - "type": "None" + "_186": { + "shape": [ + 2, + 4 + ], + "type": "int64" } }, "parameters": { - "_190": { + "_187": { "value": [ -1 ] @@ -2690,22 +2826,28 @@ "onnx_Reshape_1": { "function": "onnx::Reshape", "args": { - "data": "_189", - "shape": "_190" + "data": "_186", + "shape": "_187" } } }, "output_ports": { - "_191": { + "_188": { "value": "onnx_Reshape_1", - "type": "None" + "shape": [ + 8 + ], + "type": "int64" } } }, - "Cast_192": { + "Cast_189": { "input_ports": { - "_191": { - "type": "None" + "_188": { + "shape": [ + 8 + ], + "type": "int64" } }, "parameters": { @@ -2715,23 +2857,26 @@ "onnx_Cast_1": { "function": "onnx::Cast", "args": { - "input": "_191" + "input": "_188" } } }, "output_ports": { - "_192": { + "_189": { "value": "onnx_Cast_1", - "type": "None" + "shape": [ + 8 + ], + "type": "int64" } } }, - "Pad_194": { + "Pad_191": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_169": { + "_168": { "shape": [ 1, 64, @@ -2740,12 +2885,15 @@ ], "type": "float32" }, - "_192": { - "type": "None" + "_189": { + "shape": [ + 8 + ], + "type": "int64" } }, "parameters": { - "_193": { + "_190": { "value": 0.0 }, "mode": { @@ -2754,42 +2902,30 @@ "onnx_Pad_1": { "function": "onnx::Pad", "args": { - "data": "_169", - "pads": "_192", - "constant_value": "_193" + "data": "_168", + "pads": "_189", + "constant_value": "_190" } } }, "output_ports": { - "_194": { + "_191": { "value": "onnx_Pad_1", - "shape": [ - 1, - 64, - 33, - 33 - ], "type": "float32" } } }, - "Pad_196": { + "Pad_193": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_194": { - "shape": [ - 1, - 64, - 33, - 33 - ], + "_191": { "type": "float32" } }, "parameters": { - "_195": { + "_192": { "value": [ 0, 0, @@ -2807,31 +2943,28 @@ "onnx_Pad_1": { "function": "onnx::Pad", "args": { - "data": "_194", - "pads": "_195" + "data": "_191", + "pads": "_192" } } }, "output_ports": { - "_196": { + "_193": { "value": "onnx_Pad_1", - "type": "None" + "type": "float32" } } }, - "AveragePool_197": { + "AveragePool_194": { "metadata": { "color": ".2 .3 .2" }, "input_ports": { - "_196": { - "type": "None" + "_193": { + "type": "float32" } }, "parameters": { - "ceil_mode": { - "value": 0 - }, "kernel_shape": { "value": [ 2, @@ -2855,35 +2988,23 @@ "onnx_AveragePool_1": { "function": "onnx::AveragePool", "args": { - "X": "_196" + "X": "_193" } } }, "output_ports": { - "_197": { + "_194": { "value": "onnx_AveragePool_1", - "shape": [ - 1, - 64, - 32, - 32 - ], "type": "float32" } } }, - "Conv_198": { + "Conv_195": { "metadata": { "color": ".2 .3 .5" }, "input_ports": { - "_156": { - "shape": [ - 1, - 240, - 32, - 32 - ], + "_155": { "type": "float32" }, "conv2d12_weight": { @@ -2935,14 +3056,14 @@ "onnx_Conv_1": { "function": "onnx::Conv", "args": { - "X": "_156", + "X": "_155", "W": "conv2d12_weight", "B": "conv2d12_bias" } } }, "output_ports": { - "_198": { + "_195": { "value": "onnx_Conv_1", "shape": [ 1, @@ -2954,7 +3075,7 @@ } } }, - "Unsqueeze_200": { + "Unsqueeze_197": { "metadata": { "color": ".2 .3 .3" }, @@ -2967,7 +3088,7 @@ } }, "parameters": { - "_199": { + "_196": { "value": [ 1, 2 @@ -2977,23 +3098,28 @@ "function": "onnx::Unsqueeze", "args": { "data": "prelu11_weight", - "axes": "_199" + "axes": "_196" } } }, "output_ports": { - "_200": { + "_197": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, - "PRelu_201": { + "PRelu_198": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_198": { + "_195": { "shape": [ 1, 64, @@ -3002,21 +3128,26 @@ ], "type": "float32" }, - "_200": { - "type": "None" + "_197": { + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_198", - "slope": "_200" + "X": "_195", + "slope": "_197" } } }, "output_ports": { - "_201": { + "_198": { "value": "onnx_PRelu_1", "shape": [ 1, @@ -3028,12 +3159,12 @@ } } }, - "Conv_202": { + "Conv_199": { "metadata": { "color": ".2 .3 .5" }, "input_ports": { - "_201": { + "_198": { "shape": [ 1, 64, @@ -3091,14 +3222,14 @@ "onnx_Conv_1": { "function": "onnx::Conv", "args": { - "X": "_201", + "X": "_198", "W": "conv2d13_weight", "B": "conv2d13_bias" } } }, "output_ports": { - "_202": { + "_199": { "value": "onnx_Conv_1", "shape": [ 1, @@ -3110,7 +3241,7 @@ } } }, - "Unsqueeze_204": { + "Unsqueeze_201": { "metadata": { "color": ".2 .3 .3" }, @@ -3123,7 +3254,7 @@ } }, "parameters": { - "_203": { + "_200": { "value": [ 1, 2 @@ -3133,23 +3264,28 @@ "function": "onnx::Unsqueeze", "args": { "data": "prelu12_weight", - "axes": "_203" + "axes": "_200" } } }, "output_ports": { - "_204": { + "_201": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, - "PRelu_205": { + "PRelu_202": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_202": { + "_199": { "shape": [ 1, 92, @@ -3158,21 +3294,26 @@ ], "type": "float32" }, - "_204": { - "type": "None" + "_201": { + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_202", - "slope": "_204" + "X": "_199", + "slope": "_201" } } }, "output_ports": { - "_205": { + "_202": { "value": "onnx_PRelu_1", "shape": [ 1, @@ -3184,7 +3325,7 @@ } } }, - "Unsqueeze_207": { + "Unsqueeze_204": { "metadata": { "color": ".2 .3 .3" }, @@ -3197,7 +3338,7 @@ } }, "parameters": { - "_206": { + "_203": { "value": [ 1, 2 @@ -3207,23 +3348,28 @@ "function": "onnx::Unsqueeze", "args": { "data": "prelu13_weight", - "axes": "_206" + "axes": "_203" } } }, "output_ports": { - "_207": { + "_204": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, - "PRelu_208": { + "PRelu_205": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_158": { + "_157": { "shape": [ 1, 92, @@ -3232,21 +3378,26 @@ ], "type": "float32" }, - "_207": { - "type": "None" + "_204": { + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_158", - "slope": "_207" + "X": "_157", + "slope": "_204" } } }, "output_ports": { - "_208": { + "_205": { "value": "onnx_PRelu_1", "shape": [ 1, @@ -3258,12 +3409,12 @@ } } }, - "Concat_209": { + "Concat_206": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_208": { + "_205": { "shape": [ 1, 92, @@ -3272,7 +3423,7 @@ ], "type": "float32" }, - "_205": { + "_202": { "shape": [ 1, 92, @@ -3281,7 +3432,7 @@ ], "type": "float32" }, - "_166": { + "_165": { "shape": [ 1, 92, @@ -3290,13 +3441,7 @@ ], "type": "float32" }, - "_197": { - "shape": [ - 1, - 64, - 32, - 32 - ], + "_194": { "type": "float32" } }, @@ -3307,40 +3452,28 @@ "onnx_Concat_1": { "function": "onnx::Concat", "args": { - "inputs": "['_208', '_205', '_166', '_197']" + "inputs": "['_205', '_202', '_165', '_194']" } } }, "output_ports": { - "_209": { + "_206": { "value": "onnx_Concat_1", - "shape": [ - 1, - 340, - 32, - 32 - ], "type": "float32" } } }, - "Pad_211": { + "Pad_208": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_209": { - "shape": [ - 1, - 340, - 32, - 32 - ], + "_206": { "type": "float32" } }, "parameters": { - "_210": { + "_207": { "value": [ 0, 0, @@ -3358,31 +3491,28 @@ "onnx_Pad_1": { "function": "onnx::Pad", "args": { - "data": "_209", - "pads": "_210" + "data": "_206", + "pads": "_207" } } }, "output_ports": { - "_211": { + "_208": { "value": "onnx_Pad_1", - "type": "None" + "type": "float32" } } }, - "AveragePool_212": { + "AveragePool_209": { "metadata": { "color": ".2 .3 .2" }, "input_ports": { - "_211": { - "type": "None" + "_208": { + "type": "float32" } }, "parameters": { - "ceil_mode": { - "value": 0 - }, "kernel_shape": { "value": [ 2, @@ -3406,35 +3536,23 @@ "onnx_AveragePool_1": { "function": "onnx::AveragePool", "args": { - "X": "_211" + "X": "_208" } } }, "output_ports": { - "_212": { + "_209": { "value": "onnx_AveragePool_1", - "shape": [ - 1, - 340, - 16, - 16 - ], "type": "float32" } } }, - "Conv_213": { + "Conv_210": { "metadata": { "color": ".2 .3 .5" }, "input_ports": { - "_212": { - "shape": [ - 1, - 340, - 16, - 16 - ], + "_209": { "type": "float32" }, "conv2d14_weight": { @@ -3486,14 +3604,14 @@ "onnx_Conv_1": { "function": "onnx::Conv", "args": { - "X": "_212", + "X": "_209", "W": "conv2d14_weight", "B": "conv2d14_bias" } } }, "output_ports": { - "_213": { + "_210": { "value": "onnx_Conv_1", "shape": [ 1, @@ -3505,7 +3623,7 @@ } } }, - "Unsqueeze_215": { + "Unsqueeze_212": { "metadata": { "color": ".2 .3 .3" }, @@ -3518,7 +3636,7 @@ } }, "parameters": { - "_214": { + "_211": { "value": [ 1, 2 @@ -3528,23 +3646,28 @@ "function": "onnx::Unsqueeze", "args": { "data": "prelu14_weight", - "axes": "_214" + "axes": "_211" } } }, "output_ports": { - "_215": { + "_212": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, - "PRelu_216": { + "PRelu_213": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_213": { + "_210": { "shape": [ 1, 92, @@ -3553,21 +3676,26 @@ ], "type": "float32" }, - "_215": { - "type": "None" + "_212": { + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_213", - "slope": "_215" + "X": "_210", + "slope": "_212" } } }, "output_ports": { - "_216": { + "_213": { "value": "onnx_PRelu_1", "shape": [ 1, @@ -3579,12 +3707,12 @@ } } }, - "Conv_217": { + "Conv_214": { "metadata": { "color": ".2 .3 .5" }, "input_ports": { - "_216": { + "_213": { "shape": [ 1, 92, @@ -3642,14 +3770,14 @@ "onnx_Conv_1": { "function": "onnx::Conv", "args": { - "X": "_216", + "X": "_213", "W": "conv2d15_weight", "B": "conv2d15_bias" } } }, "output_ports": { - "_217": { + "_214": { "value": "onnx_Conv_1", "shape": [ 1, @@ -3661,7 +3789,7 @@ } } }, - "Unsqueeze_219": { + "Unsqueeze_216": { "metadata": { "color": ".2 .3 .3" }, @@ -3674,7 +3802,7 @@ } }, "parameters": { - "_218": { + "_215": { "value": [ 1, 2 @@ -3684,23 +3812,28 @@ "function": "onnx::Unsqueeze", "args": { "data": "prelu15_weight", - "axes": "_218" + "axes": "_215" } } }, "output_ports": { - "_219": { + "_216": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, - "PRelu_220": { + "PRelu_217": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_217": { + "_214": { "shape": [ 1, 128, @@ -3709,21 +3842,26 @@ ], "type": "float32" }, - "_219": { - "type": "None" + "_216": { + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_217", - "slope": "_219" + "X": "_214", + "slope": "_216" } } }, "output_ports": { - "_220": { + "_217": { "value": "onnx_PRelu_1", "shape": [ 1, @@ -3735,18 +3873,12 @@ } } }, - "Conv_221": { + "Conv_218": { "metadata": { "color": ".2 .3 .5" }, "input_ports": { - "_212": { - "shape": [ - 1, - 340, - 16, - 16 - ], + "_209": { "type": "float32" }, "conv2d16_weight": { @@ -3798,14 +3930,14 @@ "onnx_Conv_1": { "function": "onnx::Conv", "args": { - "X": "_212", + "X": "_209", "W": "conv2d16_weight", "B": "conv2d16_bias" } } }, "output_ports": { - "_221": { + "_218": { "value": "onnx_Conv_1", "shape": [ 1, @@ -3817,7 +3949,7 @@ } } }, - "Unsqueeze_223": { + "Unsqueeze_220": { "metadata": { "color": ".2 .3 .3" }, @@ -3830,7 +3962,7 @@ } }, "parameters": { - "_222": { + "_219": { "value": [ 1, 2 @@ -3840,23 +3972,28 @@ "function": "onnx::Unsqueeze", "args": { "data": "prelu16_weight", - "axes": "_222" + "axes": "_219" } } }, "output_ports": { - "_223": { + "_220": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, - "PRelu_224": { + "PRelu_221": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_221": { + "_218": { "shape": [ 1, 128, @@ -3865,21 +4002,26 @@ ], "type": "float32" }, - "_223": { - "type": "None" + "_220": { + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_221", - "slope": "_223" + "X": "_218", + "slope": "_220" } } }, "output_ports": { - "_224": { + "_221": { "value": "onnx_PRelu_1", "shape": [ 1, @@ -3891,18 +4033,12 @@ } } }, - "Conv_225": { + "Conv_222": { "metadata": { "color": ".2 .3 .5" }, "input_ports": { - "_212": { - "shape": [ - 1, - 340, - 16, - 16 - ], + "_209": { "type": "float32" }, "conv2d17_weight": { @@ -3954,14 +4090,14 @@ "onnx_Conv_1": { "function": "onnx::Conv", "args": { - "X": "_212", + "X": "_209", "W": "conv2d17_weight", "B": "conv2d17_bias" } } }, "output_ports": { - "_225": { + "_222": { "value": "onnx_Conv_1", "shape": [ 1, @@ -3973,7 +4109,7 @@ } } }, - "Unsqueeze_227": { + "Unsqueeze_224": { "metadata": { "color": ".2 .3 .3" }, @@ -3986,7 +4122,7 @@ } }, "parameters": { - "_226": { + "_223": { "value": [ 1, 2 @@ -3996,23 +4132,28 @@ "function": "onnx::Unsqueeze", "args": { "data": "prelu17_weight", - "axes": "_226" + "axes": "_223" } } }, "output_ports": { - "_227": { + "_224": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, - "PRelu_228": { + "PRelu_225": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_225": { + "_222": { "shape": [ 1, 92, @@ -4021,21 +4162,26 @@ ], "type": "float32" }, - "_227": { - "type": "None" + "_224": { + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_225", - "slope": "_227" + "X": "_222", + "slope": "_224" } } }, "output_ports": { - "_228": { + "_225": { "value": "onnx_PRelu_1", "shape": [ 1, @@ -4047,12 +4193,9 @@ } } }, - "ConcatFromSequence_230": { - "metadata": { - "color": ".3 .3 .2" - }, + "Shape_227": { "parameters": { - "_229": { + "_116": { "value": [ 0, 1, @@ -4060,58 +4203,37 @@ 1 ] }, - "axis": { - "value": 0 - }, - "new_axis": { - "value": 1 - }, - "onnx_ConcatFromSequence_1": { - "function": "onnx::ConcatFromSequence", - "args": { - "input_sequence": "_229" - } - } - }, - "output_ports": { - "_230": { - "value": "onnx_ConcatFromSequence_1", - "type": "None" - } - } - }, - "Shape_232": { - "input_ports": { - "_230": { - "type": "None" - } - }, - "parameters": { "onnx_Shape_1": { "function": "onnx::Shape", "args": { - "data": "_230" + "data": "_116" } } }, "output_ports": { - "_232": { + "_227": { "value": "onnx_Shape_1", - "type": "None" + "shape": [ + 1 + ], + "type": "int64" } } }, - "Gather_233": { + "Gather_228": { "metadata": { "color": ".2 .3 .3" }, "input_ports": { - "_232": { - "type": "None" + "_227": { + "shape": [ + 1 + ], + "type": "int64" } }, "parameters": { - "_231": { + "_226": { "value": [ 0 ] @@ -4122,93 +4244,111 @@ "onnx_Gather_1": { "function": "onnx::Gather", "args": { - "data": "_232", - "indices": "_231" + "data": "_227", + "indices": "_226" } } }, "output_ports": { - "_233": { + "_228": { "value": "onnx_Gather_1", - "type": "None" + "shape": [ + 1 + ], + "type": "int64" } } }, - "Mul_236": { + "Mul_231": { "parameters": { - "_234": { + "_229": { "value": 4 }, - "_235": { + "_230": { "value": 2 }, "onnx_Mul_1": { "function": "onnx::Mul", "args": { - "A": "_234", - "B": "_235" + "A": "_229", + "B": "_230" } } }, "output_ports": { - "_236": { + "_231": { "value": "onnx_Mul_1", "type": "int64" } } }, - "Sub_237": { + "Sub_232": { "input_ports": { - "_236": { + "_231": { "type": "int64" }, - "_233": { - "type": "None" + "_228": { + "shape": [ + 1 + ], + "type": "int64" } }, "parameters": { "onnx_Sub_1": { "function": "onnx::Sub", "args": { - "A": "_236", - "B": "_233" + "A": "_231", + "B": "_228" } } }, "output_ports": { - "_237": { + "_232": { "value": "onnx_Sub_1", + "shape": [ + 1 + ], "type": "int64" } } }, - "Cast_238": { - "input_ports": { - "_230": { - "type": "None" - } - }, + "Cast_233": { "parameters": { + "_116": { + "value": [ + 0, + 1, + 0, + 1 + ] + }, "to": { "value": 7 }, "onnx_Cast_1": { "function": "onnx::Cast", "args": { - "input": "_230" + "input": "_116" } } }, "output_ports": { - "_238": { + "_233": { "value": "onnx_Cast_1", - "type": "None" + "shape": [ + 4 + ], + "type": "int64" } } }, - "ConstantOfShape_239": { + "ConstantOfShape_234": { "input_ports": { - "_237": { + "_232": { + "shape": [ + 1 + ], "type": "int64" } }, @@ -4221,27 +4361,36 @@ "onnx_ConstantOfShape_1": { "function": "onnx::ConstantOfShape", "args": { - "input": "_237" + "input": "_232" } } }, "output_ports": { - "_239": { + "_234": { "value": "onnx_ConstantOfShape_1", - "type": "None" + "shape": [ + 4 + ], + "type": "int64" } } }, - "Concat_240": { + "Concat_235": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_238": { - "type": "None" + "_233": { + "shape": [ + 4 + ], + "type": "int64" }, - "_239": { - "type": "None" + "_234": { + "shape": [ + 4 + ], + "type": "int64" } }, "parameters": { @@ -4251,28 +4400,34 @@ "onnx_Concat_1": { "function": "onnx::Concat", "args": { - "inputs": "['_238', '_239']" + "inputs": "['_233', '_234']" } } }, "output_ports": { - "_240": { + "_235": { "value": "onnx_Concat_1", - "type": "None" + "shape": [ + 8 + ], + "type": "int64" } } }, - "Reshape_242": { + "Reshape_237": { "metadata": { "color": ".4 .3 .3" }, "input_ports": { - "_240": { - "type": "None" + "_235": { + "shape": [ + 8 + ], + "type": "int64" } }, "parameters": { - "_241": { + "_236": { "value": [ -1, 2 @@ -4284,44 +4439,52 @@ "onnx_Reshape_1": { "function": "onnx::Reshape", "args": { - "data": "_240", - "shape": "_241" + "data": "_235", + "shape": "_236" } } }, "output_ports": { - "_242": { + "_237": { "value": "onnx_Reshape_1", - "type": "None" + "shape": [ + 4, + 2 + ], + "type": "int64" } } }, - "Slice_247": { + "Slice_242": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_242": { - "type": "None" + "_237": { + "shape": [ + 4, + 2 + ], + "type": "int64" } }, "parameters": { - "_244": { + "_239": { "value": [ -1 ] }, - "_245": { + "_240": { "value": [ -9223372036854775807 ] }, - "_243": { + "_238": { "value": [ 0 ] }, - "_246": { + "_241": { "value": [ -1 ] @@ -4329,28 +4492,36 @@ "onnx_Slice_1": { "function": "onnx::Slice", "args": { - "data": "_242", - "starts": "_244", - "ends": "_245", - "axes": "_243", - "steps": "_246" + "data": "_237", + "starts": "_239", + "ends": "_240", + "axes": "_238", + "steps": "_241" } } }, "output_ports": { - "_247": { + "_242": { "value": "onnx_Slice_1", - "type": "None" + "shape": [ + 4, + 2 + ], + "type": "int64" } } }, - "Transpose_248": { + "Transpose_243": { "metadata": { "color": ".2 .3 .3" }, "input_ports": { - "_247": { - "type": "None" + "_242": { + "shape": [ + 4, + 2 + ], + "type": "int64" } }, "parameters": { @@ -4363,28 +4534,36 @@ "onnx_Transpose_1": { "function": "onnx::Transpose", "args": { - "data": "_247" + "data": "_242" } } }, "output_ports": { - "_248": { + "_243": { "value": "onnx_Transpose_1", - "type": "None" + "shape": [ + 2, + 4 + ], + "type": "int64" } } }, - "Reshape_250": { + "Reshape_245": { "metadata": { "color": ".4 .3 .3" }, "input_ports": { - "_248": { - "type": "None" + "_243": { + "shape": [ + 2, + 4 + ], + "type": "int64" } }, "parameters": { - "_249": { + "_244": { "value": [ -1 ] @@ -4395,22 +4574,28 @@ "onnx_Reshape_1": { "function": "onnx::Reshape", "args": { - "data": "_248", - "shape": "_249" + "data": "_243", + "shape": "_244" } } }, "output_ports": { - "_250": { + "_245": { "value": "onnx_Reshape_1", - "type": "None" + "shape": [ + 8 + ], + "type": "int64" } } }, - "Cast_251": { + "Cast_246": { "input_ports": { - "_250": { - "type": "None" + "_245": { + "shape": [ + 8 + ], + "type": "int64" } }, "parameters": { @@ -4420,23 +4605,26 @@ "onnx_Cast_1": { "function": "onnx::Cast", "args": { - "input": "_250" + "input": "_245" } } }, "output_ports": { - "_251": { + "_246": { "value": "onnx_Cast_1", - "type": "None" + "shape": [ + 8 + ], + "type": "int64" } } }, - "Pad_253": { + "Pad_248": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_228": { + "_225": { "shape": [ 1, 92, @@ -4445,12 +4633,15 @@ ], "type": "float32" }, - "_251": { - "type": "None" + "_246": { + "shape": [ + 8 + ], + "type": "int64" } }, "parameters": { - "_252": { + "_247": { "value": 0.0 }, "mode": { @@ -4459,42 +4650,30 @@ "onnx_Pad_1": { "function": "onnx::Pad", "args": { - "data": "_228", - "pads": "_251", - "constant_value": "_252" + "data": "_225", + "pads": "_246", + "constant_value": "_247" } } }, "output_ports": { - "_253": { + "_248": { "value": "onnx_Pad_1", - "shape": [ - 1, - 92, - 17, - 17 - ], "type": "float32" } } }, - "Pad_255": { + "Pad_250": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_253": { - "shape": [ - 1, - 92, - 17, - 17 - ], + "_248": { "type": "float32" } }, "parameters": { - "_254": { + "_249": { "value": [ 0, 0, @@ -4512,31 +4691,28 @@ "onnx_Pad_1": { "function": "onnx::Pad", "args": { - "data": "_253", - "pads": "_254" + "data": "_248", + "pads": "_249" } } }, "output_ports": { - "_255": { + "_250": { "value": "onnx_Pad_1", - "type": "None" + "type": "float32" } } }, - "AveragePool_256": { + "AveragePool_251": { "metadata": { "color": ".2 .3 .2" }, "input_ports": { - "_255": { - "type": "None" + "_250": { + "type": "float32" } }, "parameters": { - "ceil_mode": { - "value": 0 - }, "kernel_shape": { "value": [ 2, @@ -4560,35 +4736,23 @@ "onnx_AveragePool_1": { "function": "onnx::AveragePool", "args": { - "X": "_255" + "X": "_250" } } }, "output_ports": { - "_256": { + "_251": { "value": "onnx_AveragePool_1", - "shape": [ - 1, - 92, - 16, - 16 - ], "type": "float32" } } }, - "Conv_257": { + "Conv_252": { "metadata": { "color": ".2 .3 .5" }, "input_ports": { - "_212": { - "shape": [ - 1, - 340, - 16, - 16 - ], + "_209": { "type": "float32" }, "conv2d18_weight": { @@ -4640,14 +4804,14 @@ "onnx_Conv_1": { "function": "onnx::Conv", "args": { - "X": "_212", + "X": "_209", "W": "conv2d18_weight", "B": "conv2d18_bias" } } }, "output_ports": { - "_257": { + "_252": { "value": "onnx_Conv_1", "shape": [ 1, @@ -4659,7 +4823,7 @@ } } }, - "Unsqueeze_259": { + "Unsqueeze_254": { "metadata": { "color": ".2 .3 .3" }, @@ -4672,7 +4836,7 @@ } }, "parameters": { - "_258": { + "_253": { "value": [ 1, 2 @@ -4682,23 +4846,28 @@ "function": "onnx::Unsqueeze", "args": { "data": "prelu18_weight", - "axes": "_258" + "axes": "_253" } } }, "output_ports": { - "_259": { + "_254": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, - "PRelu_260": { + "PRelu_255": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_257": { + "_252": { "shape": [ 1, 92, @@ -4707,21 +4876,26 @@ ], "type": "float32" }, - "_259": { - "type": "None" + "_254": { + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_257", - "slope": "_259" + "X": "_252", + "slope": "_254" } } }, "output_ports": { - "_260": { + "_255": { "value": "onnx_PRelu_1", "shape": [ 1, @@ -4733,12 +4907,12 @@ } } }, - "Conv_261": { + "Conv_256": { "metadata": { "color": ".2 .3 .5" }, "input_ports": { - "_260": { + "_255": { "shape": [ 1, 92, @@ -4796,14 +4970,14 @@ "onnx_Conv_1": { "function": "onnx::Conv", "args": { - "X": "_260", + "X": "_255", "W": "conv2d19_weight", "B": "conv2d19_bias" } } }, "output_ports": { - "_261": { + "_256": { "value": "onnx_Conv_1", "shape": [ 1, @@ -4815,7 +4989,7 @@ } } }, - "Unsqueeze_263": { + "Unsqueeze_258": { "metadata": { "color": ".2 .3 .3" }, @@ -4828,7 +5002,7 @@ } }, "parameters": { - "_262": { + "_257": { "value": [ 1, 2 @@ -4838,23 +5012,28 @@ "function": "onnx::Unsqueeze", "args": { "data": "prelu19_weight", - "axes": "_262" + "axes": "_257" } } }, "output_ports": { - "_263": { + "_258": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, - "PRelu_264": { + "PRelu_259": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_261": { + "_256": { "shape": [ 1, 128, @@ -4863,21 +5042,26 @@ ], "type": "float32" }, - "_263": { - "type": "None" + "_258": { + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_261", - "slope": "_263" + "X": "_256", + "slope": "_258" } } }, "output_ports": { - "_264": { + "_259": { "value": "onnx_PRelu_1", "shape": [ 1, @@ -4889,12 +5073,12 @@ } } }, - "Concat_265": { + "Concat_260": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_224": { + "_221": { "shape": [ 1, 128, @@ -4903,7 +5087,7 @@ ], "type": "float32" }, - "_264": { + "_259": { "shape": [ 1, 128, @@ -4912,7 +5096,7 @@ ], "type": "float32" }, - "_220": { + "_217": { "shape": [ 1, 128, @@ -4921,13 +5105,7 @@ ], "type": "float32" }, - "_256": { - "shape": [ - 1, - 92, - 16, - 16 - ], + "_251": { "type": "float32" } }, @@ -4938,35 +5116,23 @@ "onnx_Concat_1": { "function": "onnx::Concat", "args": { - "inputs": "['_224', '_264', '_220', '_256']" + "inputs": "['_221', '_259', '_217', '_251']" } } }, "output_ports": { - "_265": { + "_260": { "value": "onnx_Concat_1", - "shape": [ - 1, - 476, - 16, - 16 - ], "type": "float32" } } }, - "Conv_266": { + "Conv_261": { "metadata": { "color": ".2 .3 .5" }, "input_ports": { - "_265": { - "shape": [ - 1, - 476, - 16, - 16 - ], + "_260": { "type": "float32" }, "conv2d20_weight": { @@ -5018,14 +5184,14 @@ "onnx_Conv_1": { "function": "onnx::Conv", "args": { - "X": "_265", + "X": "_260", "W": "conv2d20_weight", "B": "conv2d20_bias" } } }, "output_ports": { - "_266": { + "_261": { "value": "onnx_Conv_1", "shape": [ 1, @@ -5037,7 +5203,7 @@ } } }, - "Unsqueeze_268": { + "Unsqueeze_263": { "metadata": { "color": ".2 .3 .3" }, @@ -5050,7 +5216,7 @@ } }, "parameters": { - "_267": { + "_262": { "value": [ 1, 2 @@ -5060,23 +5226,28 @@ "function": "onnx::Unsqueeze", "args": { "data": "prelu20_weight", - "axes": "_267" + "axes": "_262" } } }, "output_ports": { - "_268": { + "_263": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, - "PRelu_269": { + "PRelu_264": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_266": { + "_261": { "shape": [ 1, 92, @@ -5085,21 +5256,26 @@ ], "type": "float32" }, - "_268": { - "type": "None" + "_263": { + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_266", - "slope": "_268" + "X": "_261", + "slope": "_263" } } }, "output_ports": { - "_269": { + "_264": { "value": "onnx_PRelu_1", "shape": [ 1, @@ -5111,12 +5287,12 @@ } } }, - "Conv_270": { + "Conv_265": { "metadata": { "color": ".2 .3 .5" }, "input_ports": { - "_269": { + "_264": { "shape": [ 1, 92, @@ -5174,14 +5350,14 @@ "onnx_Conv_1": { "function": "onnx::Conv", "args": { - "X": "_269", + "X": "_264", "W": "conv2d21_weight", "B": "conv2d21_bias" } } }, "output_ports": { - "_270": { + "_265": { "value": "onnx_Conv_1", "shape": [ 1, @@ -5193,7 +5369,7 @@ } } }, - "Unsqueeze_272": { + "Unsqueeze_267": { "metadata": { "color": ".2 .3 .3" }, @@ -5206,7 +5382,7 @@ } }, "parameters": { - "_271": { + "_266": { "value": [ 1, 2 @@ -5216,23 +5392,28 @@ "function": "onnx::Unsqueeze", "args": { "data": "prelu21_weight", - "axes": "_271" + "axes": "_266" } } }, "output_ports": { - "_272": { + "_267": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, - "PRelu_273": { + "PRelu_268": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_270": { + "_265": { "shape": [ 1, 128, @@ -5241,21 +5422,26 @@ ], "type": "float32" }, - "_272": { - "type": "None" + "_267": { + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_270", - "slope": "_272" + "X": "_265", + "slope": "_267" } } }, "output_ports": { - "_273": { + "_268": { "value": "onnx_PRelu_1", "shape": [ 1, @@ -5267,18 +5453,12 @@ } } }, - "Conv_274": { + "Conv_269": { "metadata": { "color": ".2 .3 .5" }, "input_ports": { - "_265": { - "shape": [ - 1, - 476, - 16, - 16 - ], + "_260": { "type": "float32" }, "conv2d22_weight": { @@ -5330,14 +5510,14 @@ "onnx_Conv_1": { "function": "onnx::Conv", "args": { - "X": "_265", + "X": "_260", "W": "conv2d22_weight", "B": "conv2d22_bias" } } }, "output_ports": { - "_274": { + "_269": { "value": "onnx_Conv_1", "shape": [ 1, @@ -5349,7 +5529,7 @@ } } }, - "Unsqueeze_276": { + "Unsqueeze_271": { "metadata": { "color": ".2 .3 .3" }, @@ -5362,7 +5542,7 @@ } }, "parameters": { - "_275": { + "_270": { "value": [ 1, 2 @@ -5372,23 +5552,28 @@ "function": "onnx::Unsqueeze", "args": { "data": "prelu22_weight", - "axes": "_275" + "axes": "_270" } } }, "output_ports": { - "_276": { + "_271": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, - "PRelu_277": { + "PRelu_272": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_274": { + "_269": { "shape": [ 1, 92, @@ -5397,21 +5582,26 @@ ], "type": "float32" }, - "_276": { - "type": "None" + "_271": { + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_274", - "slope": "_276" + "X": "_269", + "slope": "_271" } } }, "output_ports": { - "_277": { + "_272": { "value": "onnx_PRelu_1", "shape": [ 1, @@ -5423,12 +5613,9 @@ } } }, - "ConcatFromSequence_279": { - "metadata": { - "color": ".3 .3 .2" - }, + "Shape_274": { "parameters": { - "_278": { + "_116": { "value": [ 0, 1, @@ -5436,58 +5623,37 @@ 1 ] }, - "axis": { - "value": 0 - }, - "new_axis": { - "value": 1 - }, - "onnx_ConcatFromSequence_1": { - "function": "onnx::ConcatFromSequence", - "args": { - "input_sequence": "_278" - } - } - }, - "output_ports": { - "_279": { - "value": "onnx_ConcatFromSequence_1", - "type": "None" - } - } - }, - "Shape_281": { - "input_ports": { - "_279": { - "type": "None" - } - }, - "parameters": { "onnx_Shape_1": { "function": "onnx::Shape", "args": { - "data": "_279" + "data": "_116" } } }, "output_ports": { - "_281": { + "_274": { "value": "onnx_Shape_1", - "type": "None" + "shape": [ + 1 + ], + "type": "int64" } } }, - "Gather_282": { + "Gather_275": { "metadata": { "color": ".2 .3 .3" }, "input_ports": { - "_281": { - "type": "None" + "_274": { + "shape": [ + 1 + ], + "type": "int64" } }, "parameters": { - "_280": { + "_273": { "value": [ 0 ] @@ -5498,93 +5664,111 @@ "onnx_Gather_1": { "function": "onnx::Gather", "args": { - "data": "_281", - "indices": "_280" + "data": "_274", + "indices": "_273" } } }, "output_ports": { - "_282": { + "_275": { "value": "onnx_Gather_1", - "type": "None" + "shape": [ + 1 + ], + "type": "int64" } } }, - "Mul_285": { + "Mul_278": { "parameters": { - "_283": { + "_276": { "value": 4 }, - "_284": { + "_277": { "value": 2 }, "onnx_Mul_1": { "function": "onnx::Mul", "args": { - "A": "_283", - "B": "_284" + "A": "_276", + "B": "_277" } } }, "output_ports": { - "_285": { + "_278": { "value": "onnx_Mul_1", "type": "int64" } } }, - "Sub_286": { + "Sub_279": { "input_ports": { - "_285": { + "_278": { "type": "int64" }, - "_282": { - "type": "None" + "_275": { + "shape": [ + 1 + ], + "type": "int64" } }, "parameters": { "onnx_Sub_1": { "function": "onnx::Sub", "args": { - "A": "_285", - "B": "_282" + "A": "_278", + "B": "_275" } } }, "output_ports": { - "_286": { + "_279": { "value": "onnx_Sub_1", + "shape": [ + 1 + ], "type": "int64" } } }, - "Cast_287": { - "input_ports": { - "_279": { - "type": "None" - } - }, + "Cast_280": { "parameters": { + "_116": { + "value": [ + 0, + 1, + 0, + 1 + ] + }, "to": { "value": 7 }, "onnx_Cast_1": { "function": "onnx::Cast", "args": { - "input": "_279" + "input": "_116" } } }, "output_ports": { - "_287": { + "_280": { "value": "onnx_Cast_1", - "type": "None" + "shape": [ + 4 + ], + "type": "int64" } } }, - "ConstantOfShape_288": { + "ConstantOfShape_281": { "input_ports": { - "_286": { + "_279": { + "shape": [ + 1 + ], "type": "int64" } }, @@ -5597,27 +5781,36 @@ "onnx_ConstantOfShape_1": { "function": "onnx::ConstantOfShape", "args": { - "input": "_286" + "input": "_279" } } }, "output_ports": { - "_288": { + "_281": { "value": "onnx_ConstantOfShape_1", - "type": "None" + "shape": [ + 4 + ], + "type": "int64" } } }, - "Concat_289": { + "Concat_282": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_287": { - "type": "None" + "_280": { + "shape": [ + 4 + ], + "type": "int64" }, - "_288": { - "type": "None" + "_281": { + "shape": [ + 4 + ], + "type": "int64" } }, "parameters": { @@ -5627,28 +5820,34 @@ "onnx_Concat_1": { "function": "onnx::Concat", "args": { - "inputs": "['_287', '_288']" + "inputs": "['_280', '_281']" } } }, "output_ports": { - "_289": { + "_282": { "value": "onnx_Concat_1", - "type": "None" + "shape": [ + 8 + ], + "type": "int64" } } }, - "Reshape_291": { + "Reshape_284": { "metadata": { "color": ".4 .3 .3" }, "input_ports": { - "_289": { - "type": "None" + "_282": { + "shape": [ + 8 + ], + "type": "int64" } }, "parameters": { - "_290": { + "_283": { "value": [ -1, 2 @@ -5660,44 +5859,52 @@ "onnx_Reshape_1": { "function": "onnx::Reshape", "args": { - "data": "_289", - "shape": "_290" + "data": "_282", + "shape": "_283" } } }, "output_ports": { - "_291": { + "_284": { "value": "onnx_Reshape_1", - "type": "None" + "shape": [ + 4, + 2 + ], + "type": "int64" } } }, - "Slice_296": { + "Slice_289": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_291": { - "type": "None" + "_284": { + "shape": [ + 4, + 2 + ], + "type": "int64" } }, "parameters": { - "_293": { + "_286": { "value": [ -1 ] }, - "_294": { + "_287": { "value": [ -9223372036854775807 ] }, - "_292": { + "_285": { "value": [ 0 ] }, - "_295": { + "_288": { "value": [ -1 ] @@ -5705,28 +5912,36 @@ "onnx_Slice_1": { "function": "onnx::Slice", "args": { - "data": "_291", - "starts": "_293", - "ends": "_294", - "axes": "_292", - "steps": "_295" + "data": "_284", + "starts": "_286", + "ends": "_287", + "axes": "_285", + "steps": "_288" } } }, "output_ports": { - "_296": { + "_289": { "value": "onnx_Slice_1", - "type": "None" + "shape": [ + 4, + 2 + ], + "type": "int64" } } }, - "Transpose_297": { + "Transpose_290": { "metadata": { "color": ".2 .3 .3" }, "input_ports": { - "_296": { - "type": "None" + "_289": { + "shape": [ + 4, + 2 + ], + "type": "int64" } }, "parameters": { @@ -5739,28 +5954,36 @@ "onnx_Transpose_1": { "function": "onnx::Transpose", "args": { - "data": "_296" + "data": "_289" } } }, "output_ports": { - "_297": { + "_290": { "value": "onnx_Transpose_1", - "type": "None" + "shape": [ + 2, + 4 + ], + "type": "int64" } } }, - "Reshape_299": { + "Reshape_292": { "metadata": { "color": ".4 .3 .3" }, "input_ports": { - "_297": { - "type": "None" + "_290": { + "shape": [ + 2, + 4 + ], + "type": "int64" } }, "parameters": { - "_298": { + "_291": { "value": [ -1 ] @@ -5771,22 +5994,28 @@ "onnx_Reshape_1": { "function": "onnx::Reshape", "args": { - "data": "_297", - "shape": "_298" + "data": "_290", + "shape": "_291" } } }, "output_ports": { - "_299": { + "_292": { "value": "onnx_Reshape_1", - "type": "None" + "shape": [ + 8 + ], + "type": "int64" } } }, - "Cast_300": { + "Cast_293": { "input_ports": { - "_299": { - "type": "None" + "_292": { + "shape": [ + 8 + ], + "type": "int64" } }, "parameters": { @@ -5796,23 +6025,26 @@ "onnx_Cast_1": { "function": "onnx::Cast", "args": { - "input": "_299" + "input": "_292" } } }, "output_ports": { - "_300": { + "_293": { "value": "onnx_Cast_1", - "type": "None" + "shape": [ + 8 + ], + "type": "int64" } } }, - "Pad_302": { + "Pad_295": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_277": { + "_272": { "shape": [ 1, 92, @@ -5821,12 +6053,15 @@ ], "type": "float32" }, - "_300": { - "type": "None" + "_293": { + "shape": [ + 8 + ], + "type": "int64" } }, "parameters": { - "_301": { + "_294": { "value": 0.0 }, "mode": { @@ -5835,42 +6070,30 @@ "onnx_Pad_1": { "function": "onnx::Pad", "args": { - "data": "_277", - "pads": "_300", - "constant_value": "_301" + "data": "_272", + "pads": "_293", + "constant_value": "_294" } } }, "output_ports": { - "_302": { + "_295": { "value": "onnx_Pad_1", - "shape": [ - 1, - 92, - 17, - 17 - ], "type": "float32" } } }, - "Pad_304": { + "Pad_297": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_302": { - "shape": [ - 1, - 92, - 17, - 17 - ], + "_295": { "type": "float32" } }, "parameters": { - "_303": { + "_296": { "value": [ 0, 0, @@ -5888,31 +6111,28 @@ "onnx_Pad_1": { "function": "onnx::Pad", "args": { - "data": "_302", - "pads": "_303" + "data": "_295", + "pads": "_296" } } }, "output_ports": { - "_304": { + "_297": { "value": "onnx_Pad_1", - "type": "None" + "type": "float32" } } }, - "AveragePool_305": { + "AveragePool_298": { "metadata": { "color": ".2 .3 .2" }, "input_ports": { - "_304": { - "type": "None" + "_297": { + "type": "float32" } }, "parameters": { - "ceil_mode": { - "value": 0 - }, "kernel_shape": { "value": [ 2, @@ -5936,35 +6156,23 @@ "onnx_AveragePool_1": { "function": "onnx::AveragePool", "args": { - "X": "_304" + "X": "_297" } } }, "output_ports": { - "_305": { + "_298": { "value": "onnx_AveragePool_1", - "shape": [ - 1, - 92, - 16, - 16 - ], "type": "float32" } } }, - "Conv_306": { + "Conv_299": { "metadata": { "color": ".2 .3 .5" }, "input_ports": { - "_265": { - "shape": [ - 1, - 476, - 16, - 16 - ], + "_260": { "type": "float32" }, "conv2d23_weight": { @@ -6016,14 +6224,14 @@ "onnx_Conv_1": { "function": "onnx::Conv", "args": { - "X": "_265", + "X": "_260", "W": "conv2d23_weight", "B": "conv2d23_bias" } } }, "output_ports": { - "_306": { + "_299": { "value": "onnx_Conv_1", "shape": [ 1, @@ -6035,7 +6243,7 @@ } } }, - "Unsqueeze_308": { + "Unsqueeze_301": { "metadata": { "color": ".2 .3 .3" }, @@ -6048,7 +6256,7 @@ } }, "parameters": { - "_307": { + "_300": { "value": [ 1, 2 @@ -6058,23 +6266,28 @@ "function": "onnx::Unsqueeze", "args": { "data": "prelu23_weight", - "axes": "_307" + "axes": "_300" } } }, "output_ports": { - "_308": { + "_301": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, - "PRelu_309": { + "PRelu_302": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_306": { + "_299": { "shape": [ 1, 92, @@ -6083,21 +6296,26 @@ ], "type": "float32" }, - "_308": { - "type": "None" + "_301": { + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_306", - "slope": "_308" + "X": "_299", + "slope": "_301" } } }, "output_ports": { - "_309": { + "_302": { "value": "onnx_PRelu_1", "shape": [ 1, @@ -6109,12 +6327,12 @@ } } }, - "Conv_310": { + "Conv_303": { "metadata": { "color": ".2 .3 .5" }, "input_ports": { - "_309": { + "_302": { "shape": [ 1, 92, @@ -6172,14 +6390,14 @@ "onnx_Conv_1": { "function": "onnx::Conv", "args": { - "X": "_309", + "X": "_302", "W": "conv2d24_weight", "B": "conv2d24_bias" } } }, "output_ports": { - "_310": { + "_303": { "value": "onnx_Conv_1", "shape": [ 1, @@ -6191,7 +6409,7 @@ } } }, - "Unsqueeze_312": { + "Unsqueeze_305": { "metadata": { "color": ".2 .3 .3" }, @@ -6204,7 +6422,7 @@ } }, "parameters": { - "_311": { + "_304": { "value": [ 1, 2 @@ -6214,23 +6432,28 @@ "function": "onnx::Unsqueeze", "args": { "data": "prelu24_weight", - "axes": "_311" + "axes": "_304" } } }, "output_ports": { - "_312": { + "_305": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, - "PRelu_313": { + "PRelu_306": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_310": { + "_303": { "shape": [ 1, 128, @@ -6239,21 +6462,26 @@ ], "type": "float32" }, - "_312": { - "type": "None" + "_305": { + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_310", - "slope": "_312" + "X": "_303", + "slope": "_305" } } }, "output_ports": { - "_313": { + "_306": { "value": "onnx_PRelu_1", "shape": [ 1, @@ -6265,18 +6493,12 @@ } } }, - "Conv_314": { + "Conv_307": { "metadata": { "color": ".2 .3 .5" }, "input_ports": { - "_265": { - "shape": [ - 1, - 476, - 16, - 16 - ], + "_260": { "type": "float32" }, "conv2d25_weight": { @@ -6328,14 +6550,14 @@ "onnx_Conv_1": { "function": "onnx::Conv", "args": { - "X": "_265", + "X": "_260", "W": "conv2d25_weight", "B": "conv2d25_bias" } } }, "output_ports": { - "_314": { + "_307": { "value": "onnx_Conv_1", "shape": [ 1, @@ -6347,7 +6569,7 @@ } } }, - "Unsqueeze_316": { + "Unsqueeze_309": { "metadata": { "color": ".2 .3 .3" }, @@ -6360,7 +6582,7 @@ } }, "parameters": { - "_315": { + "_308": { "value": [ 1, 2 @@ -6370,23 +6592,28 @@ "function": "onnx::Unsqueeze", "args": { "data": "prelu25_weight", - "axes": "_315" + "axes": "_308" } } }, "output_ports": { - "_316": { + "_309": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, - "PRelu_317": { + "PRelu_310": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_314": { + "_307": { "shape": [ 1, 128, @@ -6395,21 +6622,26 @@ ], "type": "float32" }, - "_316": { - "type": "None" + "_309": { + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_314", - "slope": "_316" + "X": "_307", + "slope": "_309" } } }, "output_ports": { - "_317": { + "_310": { "value": "onnx_PRelu_1", "shape": [ 1, @@ -6421,12 +6653,12 @@ } } }, - "Concat_318": { + "Concat_311": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_317": { + "_310": { "shape": [ 1, 128, @@ -6435,7 +6667,7 @@ ], "type": "float32" }, - "_273": { + "_268": { "shape": [ 1, 128, @@ -6444,7 +6676,7 @@ ], "type": "float32" }, - "_313": { + "_306": { "shape": [ 1, 128, @@ -6453,13 +6685,7 @@ ], "type": "float32" }, - "_305": { - "shape": [ - 1, - 92, - 16, - 16 - ], + "_298": { "type": "float32" } }, @@ -6470,40 +6696,28 @@ "onnx_Concat_1": { "function": "onnx::Concat", "args": { - "inputs": "['_317', '_273', '_313', '_305']" + "inputs": "['_310', '_268', '_306', '_298']" } } }, "output_ports": { - "_318": { + "_311": { "value": "onnx_Concat_1", - "shape": [ - 1, - 476, - 16, - 16 - ], "type": "float32" } } }, - "Pad_320": { + "Pad_313": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_318": { - "shape": [ - 1, - 476, - 16, - 16 - ], + "_311": { "type": "float32" } }, "parameters": { - "_319": { + "_312": { "value": [ 0, 0, @@ -6521,31 +6735,28 @@ "onnx_Pad_1": { "function": "onnx::Pad", "args": { - "data": "_318", - "pads": "_319" + "data": "_311", + "pads": "_312" } } }, "output_ports": { - "_320": { + "_313": { "value": "onnx_Pad_1", - "type": "None" + "type": "float32" } } }, - "AveragePool_321": { + "AveragePool_314": { "metadata": { "color": ".2 .3 .2" }, "input_ports": { - "_320": { - "type": "None" + "_313": { + "type": "float32" } }, "parameters": { - "ceil_mode": { - "value": 0 - }, "kernel_shape": { "value": [ 2, @@ -6569,35 +6780,23 @@ "onnx_AveragePool_1": { "function": "onnx::AveragePool", "args": { - "X": "_320" + "X": "_313" } } }, "output_ports": { - "_321": { + "_314": { "value": "onnx_AveragePool_1", - "shape": [ - 1, - 476, - 8, - 8 - ], "type": "float32" } } }, - "Conv_322": { + "Conv_315": { "metadata": { "color": ".2 .3 .5" }, "input_ports": { - "_321": { - "shape": [ - 1, - 476, - 8, - 8 - ], + "_314": { "type": "float32" }, "conv2d26_weight": { @@ -6649,14 +6848,14 @@ "onnx_Conv_1": { "function": "onnx::Conv", "args": { - "X": "_321", + "X": "_314", "W": "conv2d26_weight", "B": "conv2d26_bias" } } }, "output_ports": { - "_322": { + "_315": { "value": "onnx_Conv_1", "shape": [ 1, @@ -6668,7 +6867,7 @@ } } }, - "Unsqueeze_324": { + "Unsqueeze_317": { "metadata": { "color": ".2 .3 .3" }, @@ -6681,7 +6880,7 @@ } }, "parameters": { - "_323": { + "_316": { "value": [ 1, 2 @@ -6691,23 +6890,28 @@ "function": "onnx::Unsqueeze", "args": { "data": "prelu26_weight", - "axes": "_323" + "axes": "_316" } } }, "output_ports": { - "_324": { + "_317": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, - "PRelu_325": { + "PRelu_318": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_322": { + "_315": { "shape": [ 1, 92, @@ -6716,21 +6920,26 @@ ], "type": "float32" }, - "_324": { - "type": "None" + "_317": { + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_322", - "slope": "_324" + "X": "_315", + "slope": "_317" } } }, "output_ports": { - "_325": { + "_318": { "value": "onnx_PRelu_1", "shape": [ 1, @@ -6742,12 +6951,9 @@ } } }, - "ConcatFromSequence_327": { - "metadata": { - "color": ".3 .3 .2" - }, + "Shape_320": { "parameters": { - "_326": { + "_116": { "value": [ 0, 1, @@ -6755,58 +6961,37 @@ 1 ] }, - "axis": { - "value": 0 - }, - "new_axis": { - "value": 1 - }, - "onnx_ConcatFromSequence_1": { - "function": "onnx::ConcatFromSequence", - "args": { - "input_sequence": "_326" - } - } - }, - "output_ports": { - "_327": { - "value": "onnx_ConcatFromSequence_1", - "type": "None" - } - } - }, - "Shape_329": { - "input_ports": { - "_327": { - "type": "None" - } - }, - "parameters": { "onnx_Shape_1": { "function": "onnx::Shape", "args": { - "data": "_327" + "data": "_116" } } }, "output_ports": { - "_329": { + "_320": { "value": "onnx_Shape_1", - "type": "None" + "shape": [ + 1 + ], + "type": "int64" } } }, - "Gather_330": { + "Gather_321": { "metadata": { "color": ".2 .3 .3" }, "input_ports": { - "_329": { - "type": "None" + "_320": { + "shape": [ + 1 + ], + "type": "int64" } }, "parameters": { - "_328": { + "_319": { "value": [ 0 ] @@ -6817,93 +7002,111 @@ "onnx_Gather_1": { "function": "onnx::Gather", "args": { - "data": "_329", - "indices": "_328" + "data": "_320", + "indices": "_319" } } }, "output_ports": { - "_330": { + "_321": { "value": "onnx_Gather_1", - "type": "None" + "shape": [ + 1 + ], + "type": "int64" } } }, - "Mul_333": { + "Mul_324": { "parameters": { - "_331": { + "_322": { "value": 4 }, - "_332": { + "_323": { "value": 2 }, "onnx_Mul_1": { "function": "onnx::Mul", "args": { - "A": "_331", - "B": "_332" + "A": "_322", + "B": "_323" } } }, "output_ports": { - "_333": { + "_324": { "value": "onnx_Mul_1", "type": "int64" } } }, - "Sub_334": { + "Sub_325": { "input_ports": { - "_333": { + "_324": { "type": "int64" }, - "_330": { - "type": "None" + "_321": { + "shape": [ + 1 + ], + "type": "int64" } }, "parameters": { "onnx_Sub_1": { "function": "onnx::Sub", "args": { - "A": "_333", - "B": "_330" + "A": "_324", + "B": "_321" } } }, "output_ports": { - "_334": { + "_325": { "value": "onnx_Sub_1", + "shape": [ + 1 + ], "type": "int64" } } }, - "Cast_335": { - "input_ports": { - "_327": { - "type": "None" - } - }, + "Cast_326": { "parameters": { + "_116": { + "value": [ + 0, + 1, + 0, + 1 + ] + }, "to": { "value": 7 }, "onnx_Cast_1": { "function": "onnx::Cast", "args": { - "input": "_327" + "input": "_116" } } }, "output_ports": { - "_335": { + "_326": { "value": "onnx_Cast_1", - "type": "None" + "shape": [ + 4 + ], + "type": "int64" } } }, - "ConstantOfShape_336": { + "ConstantOfShape_327": { "input_ports": { - "_334": { + "_325": { + "shape": [ + 1 + ], "type": "int64" } }, @@ -6916,27 +7119,36 @@ "onnx_ConstantOfShape_1": { "function": "onnx::ConstantOfShape", "args": { - "input": "_334" + "input": "_325" } } }, "output_ports": { - "_336": { + "_327": { "value": "onnx_ConstantOfShape_1", - "type": "None" + "shape": [ + 4 + ], + "type": "int64" } } }, - "Concat_337": { + "Concat_328": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_335": { - "type": "None" + "_326": { + "shape": [ + 4 + ], + "type": "int64" }, - "_336": { - "type": "None" + "_327": { + "shape": [ + 4 + ], + "type": "int64" } }, "parameters": { @@ -6946,28 +7158,34 @@ "onnx_Concat_1": { "function": "onnx::Concat", "args": { - "inputs": "['_335', '_336']" + "inputs": "['_326', '_327']" } } }, "output_ports": { - "_337": { + "_328": { "value": "onnx_Concat_1", - "type": "None" + "shape": [ + 8 + ], + "type": "int64" } } }, - "Reshape_339": { + "Reshape_330": { "metadata": { "color": ".4 .3 .3" }, "input_ports": { - "_337": { - "type": "None" + "_328": { + "shape": [ + 8 + ], + "type": "int64" } }, "parameters": { - "_338": { + "_329": { "value": [ -1, 2 @@ -6979,44 +7197,52 @@ "onnx_Reshape_1": { "function": "onnx::Reshape", "args": { - "data": "_337", - "shape": "_338" + "data": "_328", + "shape": "_329" } } }, "output_ports": { - "_339": { + "_330": { "value": "onnx_Reshape_1", - "type": "None" + "shape": [ + 4, + 2 + ], + "type": "int64" } } }, - "Slice_344": { + "Slice_335": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_339": { - "type": "None" + "_330": { + "shape": [ + 4, + 2 + ], + "type": "int64" } }, "parameters": { - "_341": { + "_332": { "value": [ -1 ] }, - "_342": { + "_333": { "value": [ -9223372036854775807 ] }, - "_340": { + "_331": { "value": [ 0 ] }, - "_343": { + "_334": { "value": [ -1 ] @@ -7024,28 +7250,36 @@ "onnx_Slice_1": { "function": "onnx::Slice", "args": { - "data": "_339", - "starts": "_341", - "ends": "_342", - "axes": "_340", - "steps": "_343" + "data": "_330", + "starts": "_332", + "ends": "_333", + "axes": "_331", + "steps": "_334" } } }, "output_ports": { - "_344": { + "_335": { "value": "onnx_Slice_1", - "type": "None" + "shape": [ + 4, + 2 + ], + "type": "int64" } } }, - "Transpose_345": { + "Transpose_336": { "metadata": { "color": ".2 .3 .3" }, "input_ports": { - "_344": { - "type": "None" + "_335": { + "shape": [ + 4, + 2 + ], + "type": "int64" } }, "parameters": { @@ -7058,28 +7292,36 @@ "onnx_Transpose_1": { "function": "onnx::Transpose", "args": { - "data": "_344" + "data": "_335" } } }, "output_ports": { - "_345": { + "_336": { "value": "onnx_Transpose_1", - "type": "None" + "shape": [ + 2, + 4 + ], + "type": "int64" } } }, - "Reshape_347": { + "Reshape_338": { "metadata": { "color": ".4 .3 .3" }, "input_ports": { - "_345": { - "type": "None" + "_336": { + "shape": [ + 2, + 4 + ], + "type": "int64" } }, "parameters": { - "_346": { + "_337": { "value": [ -1 ] @@ -7090,22 +7332,28 @@ "onnx_Reshape_1": { "function": "onnx::Reshape", "args": { - "data": "_345", - "shape": "_346" + "data": "_336", + "shape": "_337" } } }, "output_ports": { - "_347": { + "_338": { "value": "onnx_Reshape_1", - "type": "None" + "shape": [ + 8 + ], + "type": "int64" } } }, - "Cast_348": { + "Cast_339": { "input_ports": { - "_347": { - "type": "None" + "_338": { + "shape": [ + 8 + ], + "type": "int64" } }, "parameters": { @@ -7115,23 +7363,26 @@ "onnx_Cast_1": { "function": "onnx::Cast", "args": { - "input": "_347" + "input": "_338" } } }, "output_ports": { - "_348": { + "_339": { "value": "onnx_Cast_1", - "type": "None" + "shape": [ + 8 + ], + "type": "int64" } } }, - "Pad_350": { + "Pad_341": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_325": { + "_318": { "shape": [ 1, 92, @@ -7140,12 +7391,15 @@ ], "type": "float32" }, - "_348": { - "type": "None" + "_339": { + "shape": [ + 8 + ], + "type": "int64" } }, "parameters": { - "_349": { + "_340": { "value": 0.0 }, "mode": { @@ -7154,42 +7408,30 @@ "onnx_Pad_1": { "function": "onnx::Pad", "args": { - "data": "_325", - "pads": "_348", - "constant_value": "_349" + "data": "_318", + "pads": "_339", + "constant_value": "_340" } } }, "output_ports": { - "_350": { + "_341": { "value": "onnx_Pad_1", - "shape": [ - 1, - 92, - 9, - 9 - ], "type": "float32" } } }, - "Pad_352": { + "Pad_343": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_350": { - "shape": [ - 1, - 92, - 9, - 9 - ], + "_341": { "type": "float32" } }, "parameters": { - "_351": { + "_342": { "value": [ 0, 0, @@ -7207,31 +7449,28 @@ "onnx_Pad_1": { "function": "onnx::Pad", "args": { - "data": "_350", - "pads": "_351" + "data": "_341", + "pads": "_342" } } }, "output_ports": { - "_352": { + "_343": { "value": "onnx_Pad_1", - "type": "None" + "type": "float32" } } }, - "AveragePool_353": { + "AveragePool_344": { "metadata": { "color": ".2 .3 .2" }, "input_ports": { - "_352": { - "type": "None" + "_343": { + "type": "float32" } }, "parameters": { - "ceil_mode": { - "value": 0 - }, "kernel_shape": { "value": [ 2, @@ -7255,35 +7494,23 @@ "onnx_AveragePool_1": { "function": "onnx::AveragePool", "args": { - "X": "_352" + "X": "_343" } } }, "output_ports": { - "_353": { + "_344": { "value": "onnx_AveragePool_1", - "shape": [ - 1, - 92, - 8, - 8 - ], "type": "float32" } } }, - "Conv_354": { + "Conv_345": { "metadata": { "color": ".2 .3 .5" }, "input_ports": { - "_321": { - "shape": [ - 1, - 476, - 8, - 8 - ], + "_314": { "type": "float32" }, "conv2d27_weight": { @@ -7335,14 +7562,14 @@ "onnx_Conv_1": { "function": "onnx::Conv", "args": { - "X": "_321", + "X": "_314", "W": "conv2d27_weight", "B": "conv2d27_bias" } } }, "output_ports": { - "_354": { + "_345": { "value": "onnx_Conv_1", "shape": [ 1, @@ -7354,7 +7581,7 @@ } } }, - "Unsqueeze_356": { + "Unsqueeze_347": { "metadata": { "color": ".2 .3 .3" }, @@ -7367,7 +7594,7 @@ } }, "parameters": { - "_355": { + "_346": { "value": [ 1, 2 @@ -7377,23 +7604,28 @@ "function": "onnx::Unsqueeze", "args": { "data": "prelu27_weight", - "axes": "_355" + "axes": "_346" } } }, "output_ports": { - "_356": { + "_347": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, - "PRelu_357": { + "PRelu_348": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_354": { + "_345": { "shape": [ 1, 92, @@ -7402,21 +7634,26 @@ ], "type": "float32" }, - "_356": { - "type": "None" + "_347": { + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_354", - "slope": "_356" + "X": "_345", + "slope": "_347" } } }, "output_ports": { - "_357": { + "_348": { "value": "onnx_PRelu_1", "shape": [ 1, @@ -7428,12 +7665,12 @@ } } }, - "Conv_358": { + "Conv_349": { "metadata": { "color": ".2 .3 .5" }, "input_ports": { - "_357": { + "_348": { "shape": [ 1, 92, @@ -7491,14 +7728,14 @@ "onnx_Conv_1": { "function": "onnx::Conv", "args": { - "X": "_357", + "X": "_348", "W": "conv2d28_weight", "B": "conv2d28_bias" } } }, "output_ports": { - "_358": { + "_349": { "value": "onnx_Conv_1", "shape": [ 1, @@ -7510,7 +7747,7 @@ } } }, - "Unsqueeze_360": { + "Unsqueeze_351": { "metadata": { "color": ".2 .3 .3" }, @@ -7523,7 +7760,7 @@ } }, "parameters": { - "_359": { + "_350": { "value": [ 1, 2 @@ -7533,23 +7770,28 @@ "function": "onnx::Unsqueeze", "args": { "data": "prelu28_weight", - "axes": "_359" + "axes": "_350" } } }, "output_ports": { - "_360": { + "_351": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, - "PRelu_361": { + "PRelu_352": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_358": { + "_349": { "shape": [ 1, 128, @@ -7558,21 +7800,26 @@ ], "type": "float32" }, - "_360": { - "type": "None" + "_351": { + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_358", - "slope": "_360" + "X": "_349", + "slope": "_351" } } }, "output_ports": { - "_361": { + "_352": { "value": "onnx_PRelu_1", "shape": [ 1, @@ -7584,18 +7831,12 @@ } } }, - "Conv_362": { + "Conv_353": { "metadata": { "color": ".2 .3 .5" }, "input_ports": { - "_321": { - "shape": [ - 1, - 476, - 8, - 8 - ], + "_314": { "type": "float32" }, "conv2d29_weight": { @@ -7647,14 +7888,14 @@ "onnx_Conv_1": { "function": "onnx::Conv", "args": { - "X": "_321", + "X": "_314", "W": "conv2d29_weight", "B": "conv2d29_bias" } } }, "output_ports": { - "_362": { + "_353": { "value": "onnx_Conv_1", "shape": [ 1, @@ -7666,7 +7907,7 @@ } } }, - "Unsqueeze_364": { + "Unsqueeze_355": { "metadata": { "color": ".2 .3 .3" }, @@ -7679,7 +7920,7 @@ } }, "parameters": { - "_363": { + "_354": { "value": [ 1, 2 @@ -7689,23 +7930,28 @@ "function": "onnx::Unsqueeze", "args": { "data": "prelu29_weight", - "axes": "_363" + "axes": "_354" } } }, "output_ports": { - "_364": { + "_355": { "value": "onnx_Unsqueeze_1", - "type": "None" + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } } }, - "PRelu_365": { + "PRelu_356": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_362": { + "_353": { "shape": [ 1, 128, @@ -7714,21 +7960,26 @@ ], "type": "float32" }, - "_364": { - "type": "None" + "_355": { + "shape": [ + 1, + 1, + 1 + ], + "type": "float32" } }, "parameters": { "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_362", - "slope": "_364" + "X": "_353", + "slope": "_355" } } }, "output_ports": { - "_365": { + "_356": { "value": "onnx_PRelu_1", "shape": [ 1, @@ -7740,21 +7991,12 @@ } } }, - "Concat_366": { + "Concat_357": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_365": { - "shape": [ - 1, - 128, - 8, - 8 - ], - "type": "float32" - }, - "_361": { + "_356": { "shape": [ 1, 128, @@ -7763,14 +8005,17 @@ ], "type": "float32" }, - "_353": { + "_352": { "shape": [ 1, - 92, + 128, 8, 8 ], "type": "float32" + }, + "_344": { + "type": "float32" } }, "parameters": { @@ -7780,32 +8025,20 @@ "onnx_Concat_1": { "function": "onnx::Concat", "args": { - "inputs": "['_365', '_361', '_353']" + "inputs": "['_356', '_352', '_344']" } } }, "output_ports": { - "_366": { + "_357": { "value": "onnx_Concat_1", - "shape": [ - 1, - 348, - 8, - 8 - ], "type": "float32" } } }, - "Shape_367": { + "Shape_358": { "input_ports": { - "_366": { - "shape": [ - 1, - 348, - 8, - 8 - ], + "_357": { "type": "float32" } }, @@ -7813,38 +8046,44 @@ "onnx_Shape_1": { "function": "onnx::Shape", "args": { - "data": "_366" + "data": "_357" } } }, "output_ports": { - "_367": { + "_358": { "value": "onnx_Shape_1", - "type": "None" + "shape": [ + 4 + ], + "type": "int64" } } }, - "Slice_371": { + "Slice_362": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_367": { - "type": "None" + "_358": { + "shape": [ + 4 + ], + "type": "int64" } }, "parameters": { - "_369": { + "_360": { "value": [ 0 ] }, - "_370": { + "_361": { "value": [ 0 ] }, - "_368": { + "_359": { "value": [ 0 ] @@ -7852,31 +8091,37 @@ "onnx_Slice_1": { "function": "onnx::Slice", "args": { - "data": "_367", - "starts": "_369", - "ends": "_370", - "axes": "_368" + "data": "_358", + "starts": "_360", + "ends": "_361", + "axes": "_359" } } }, "output_ports": { - "_371": { + "_362": { "value": "onnx_Slice_1", - "type": "None" + "shape": [ + 0 + ], + "type": "int64" } } }, - "Concat_373": { + "Concat_364": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_371": { - "type": "None" + "_362": { + "shape": [ + 0 + ], + "type": "int64" } }, "parameters": { - "_372": { + "_363": { "value": [ -1 ] @@ -7887,33 +8132,33 @@ "onnx_Concat_1": { "function": "onnx::Concat", "args": { - "inputs": "['_371', '_372']" + "inputs": "['_362', '_363']" } } }, "output_ports": { - "_373": { + "_364": { "value": "onnx_Concat_1", - "type": "None" + "shape": [ + 1 + ], + "type": "int64" } } }, - "Reshape_374": { + "Reshape_365": { "metadata": { "color": ".4 .3 .3" }, "input_ports": { - "_366": { - "shape": [ - 1, - 348, - 8, - 8 - ], + "_357": { "type": "float32" }, - "_373": { - "type": "None" + "_364": { + "shape": [ + 1 + ], + "type": "int64" } }, "parameters": { @@ -7923,30 +8168,24 @@ "onnx_Reshape_1": { "function": "onnx::Reshape", "args": { - "data": "_366", - "shape": "_373" + "data": "_357", + "shape": "_364" } } }, "output_ports": { - "_374": { + "_365": { "value": "onnx_Reshape_1", - "shape": [ - 22272 - ], "type": "float32" } } }, - "Concat_375": { + "Concat_366": { "metadata": { "color": ".3 .3 .2" }, "input_ports": { - "_374": { - "shape": [ - 22272 - ], + "_365": { "type": "float32" }, "input2": { @@ -7963,21 +8202,18 @@ "onnx_Concat_1": { "function": "onnx::Concat", "args": { - "inputs": "['_374', 'input2']" + "inputs": "['_365', 'input2']" } } }, "output_ports": { - "_375": { + "_366": { "value": "onnx_Concat_1", - "shape": [ - 22273 - ], "type": "float32" } } }, - "Transpose_376": { + "Transpose_367": { "metadata": { "color": ".2 .3 .3" }, @@ -8005,41 +8241,49 @@ } }, "output_ports": { - "_376": { + "_367": { "value": "onnx_Transpose_1", - "type": "None" + "shape": [ + 22273, + 1096 + ], + "type": "float32" } } }, - "MatMul_377": { + "MatMul_368": { "input_ports": { - "_375": { + "_366": { + "type": "float32" + }, + "_367": { "shape": [ - 22273 + 22273, + 1096 ], "type": "float32" - }, - "_376": { - "type": "None" } }, "parameters": { "onnx_MatMul_1": { "function": "onnx::MatMul", "args": { - "A": "_375", - "B": "_376" + "A": "_366", + "B": "_367" } } }, "output_ports": { - "_377": { + "_368": { "value": "onnx_MatMul_1", - "type": "None" + "shape": [ + 1096 + ], + "type": "float32" } } }, - "Add_378": { + "Add_369": { "input_ports": { "dense_bias": { "shape": [ @@ -8047,8 +8291,11 @@ ], "type": "float32" }, - "_377": { - "type": "None" + "_368": { + "shape": [ + 1096 + ], + "type": "float32" } }, "parameters": { @@ -8056,12 +8303,12 @@ "function": "onnx::Add", "args": { "A": "dense_bias", - "B": "_377" + "B": "_368" } } }, "output_ports": { - "_378": { + "_369": { "value": "onnx_Add_1", "shape": [ 1096 @@ -8070,12 +8317,12 @@ } } }, - "PRelu_379": { + "PRelu_370": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_378": { + "_369": { "shape": [ 1096 ], @@ -8092,13 +8339,13 @@ "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_378", + "X": "_369", "slope": "prelu30_weight" } } }, "output_ports": { - "_379": { + "_370": { "value": "onnx_PRelu_1", "shape": [ 1096 @@ -8107,7 +8354,7 @@ } } }, - "Transpose_380": { + "Transpose_371": { "metadata": { "color": ".2 .3 .3" }, @@ -8135,41 +8382,52 @@ } }, "output_ports": { - "_380": { + "_371": { "value": "onnx_Transpose_1", - "type": "None" + "shape": [ + 1096, + 1096 + ], + "type": "float32" } } }, - "MatMul_381": { + "MatMul_372": { "input_ports": { - "_379": { + "_370": { "shape": [ 1096 ], "type": "float32" }, - "_380": { - "type": "None" + "_371": { + "shape": [ + 1096, + 1096 + ], + "type": "float32" } }, "parameters": { "onnx_MatMul_1": { "function": "onnx::MatMul", "args": { - "A": "_379", - "B": "_380" + "A": "_370", + "B": "_371" } } }, "output_ports": { - "_381": { + "_372": { "value": "onnx_MatMul_1", - "type": "None" + "shape": [ + 1096 + ], + "type": "float32" } } }, - "Add_382": { + "Add_373": { "input_ports": { "dense2_bias": { "shape": [ @@ -8177,8 +8435,11 @@ ], "type": "float32" }, - "_381": { - "type": "None" + "_372": { + "shape": [ + 1096 + ], + "type": "float32" } }, "parameters": { @@ -8186,12 +8447,12 @@ "function": "onnx::Add", "args": { "A": "dense2_bias", - "B": "_381" + "B": "_372" } } }, "output_ports": { - "_382": { + "_373": { "value": "onnx_Add_1", "shape": [ 1096 @@ -8200,12 +8461,12 @@ } } }, - "PRelu_383": { + "PRelu_374": { "metadata": { "color": ".4 .2 .1" }, "input_ports": { - "_382": { + "_373": { "shape": [ 1096 ], @@ -8222,13 +8483,13 @@ "onnx_PRelu_1": { "function": "onnx::PRelu", "args": { - "X": "_382", + "X": "_373", "slope": "prelu31_weight" } } }, "output_ports": { - "_383": { + "_374": { "value": "onnx_PRelu_1", "shape": [ 1096 @@ -8237,7 +8498,7 @@ } } }, - "Transpose_384": { + "Transpose_375": { "metadata": { "color": ".2 .3 .3" }, @@ -8265,41 +8526,52 @@ } }, "output_ports": { - "_384": { + "_375": { "value": "onnx_Transpose_1", - "type": "None" + "shape": [ + 1096, + 180 + ], + "type": "float32" } } }, - "MatMul_385": { + "MatMul_376": { "input_ports": { - "_383": { + "_374": { "shape": [ 1096 ], "type": "float32" }, - "_384": { - "type": "None" + "_375": { + "shape": [ + 1096, + 180 + ], + "type": "float32" } }, "parameters": { "onnx_MatMul_1": { "function": "onnx::MatMul", "args": { - "A": "_383", - "B": "_384" + "A": "_374", + "B": "_375" } } }, "output_ports": { - "_385": { + "_376": { "value": "onnx_MatMul_1", - "type": "None" + "shape": [ + 180 + ], + "type": "float32" } } }, - "Add_386": { + "Add_377": { "input_ports": { "dense3_bias": { "shape": [ @@ -8307,8 +8579,11 @@ ], "type": "float32" }, - "_385": { - "type": "None" + "_376": { + "shape": [ + 180 + ], + "type": "float32" } }, "parameters": { @@ -8316,12 +8591,12 @@ "function": "onnx::Add", "args": { "A": "dense3_bias", - "B": "_385" + "B": "_376" } } }, "output_ports": { - "_386": { + "_377": { "value": "onnx_Add_1", "shape": [ 180 @@ -8368,15 +8643,15 @@ "sender_port": "_103", "receiver_port": "_103" }, - "AveragePool_103_Conv_144": { + "AveragePool_103_Conv_143": { "sender": "AveragePool_103", - "receiver": "Conv_144", + "receiver": "Conv_143", "sender_port": "_103", "receiver_port": "_103" }, - "AveragePool_103_Conv_148": { + "AveragePool_103_Conv_147": { "sender": "AveragePool_103", - "receiver": "Conv_148", + "receiver": "Conv_147", "sender_port": "_103", "receiver_port": "_103" }, @@ -8410,9 +8685,9 @@ "sender_port": "_110", "receiver_port": "_110" }, - "PRelu_111_Concat_156": { + "PRelu_111_Concat_155": { "sender": "PRelu_111", - "receiver": "Concat_156", + "receiver": "Concat_155", "sender_port": "_111", "receiver_port": "_111" }, @@ -8428,375 +8703,363 @@ "sender_port": "_114", "receiver_port": "_114" }, - "PRelu_115_Pad_140": { + "PRelu_115_Pad_139": { "sender": "PRelu_115", - "receiver": "Pad_140", + "receiver": "Pad_139", "sender_port": "_115", "receiver_port": "_115" }, - "ConcatFromSequence_117_Shape_119": { - "sender": "ConcatFromSequence_117", - "receiver": "Shape_119", - "sender_port": "_117", - "receiver_port": "_117" - }, - "ConcatFromSequence_117_Cast_125": { - "sender": "ConcatFromSequence_117", - "receiver": "Cast_125", - "sender_port": "_117", - "receiver_port": "_117" - }, - "Shape_119_Gather_120": { - "sender": "Shape_119", - "receiver": "Gather_120", + "Shape_118_Gather_119": { + "sender": "Shape_118", + "receiver": "Gather_119", + "sender_port": "_118", + "receiver_port": "_118" + }, + "Gather_119_Sub_123": { + "sender": "Gather_119", + "receiver": "Sub_123", "sender_port": "_119", "receiver_port": "_119" }, - "Gather_120_Sub_124": { - "sender": "Gather_120", - "receiver": "Sub_124", - "sender_port": "_120", - "receiver_port": "_120" + "Mul_122_Sub_123": { + "sender": "Mul_122", + "receiver": "Sub_123", + "sender_port": "_122", + "receiver_port": "_122" }, - "Mul_123_Sub_124": { - "sender": "Mul_123", - "receiver": "Sub_124", + "Sub_123_ConstantOfShape_125": { + "sender": "Sub_123", + "receiver": "ConstantOfShape_125", "sender_port": "_123", "receiver_port": "_123" }, - "Sub_124_ConstantOfShape_126": { - "sender": "Sub_124", - "receiver": "ConstantOfShape_126", + "Cast_124_Concat_126": { + "sender": "Cast_124", + "receiver": "Concat_126", "sender_port": "_124", "receiver_port": "_124" }, - "Cast_125_Concat_127": { - "sender": "Cast_125", - "receiver": "Concat_127", + "ConstantOfShape_125_Concat_126": { + "sender": "ConstantOfShape_125", + "receiver": "Concat_126", "sender_port": "_125", "receiver_port": "_125" }, - "ConstantOfShape_126_Concat_127": { - "sender": "ConstantOfShape_126", - "receiver": "Concat_127", + "Concat_126_Reshape_128": { + "sender": "Concat_126", + "receiver": "Reshape_128", "sender_port": "_126", "receiver_port": "_126" }, - "Concat_127_Reshape_129": { - "sender": "Concat_127", - "receiver": "Reshape_129", - "sender_port": "_127", - "receiver_port": "_127" - }, - "Reshape_129_Slice_134": { - "sender": "Reshape_129", - "receiver": "Slice_134", - "sender_port": "_129", - "receiver_port": "_129" - }, - "Slice_134_Transpose_135": { - "sender": "Slice_134", - "receiver": "Transpose_135", + "Reshape_128_Slice_133": { + "sender": "Reshape_128", + "receiver": "Slice_133", + "sender_port": "_128", + "receiver_port": "_128" + }, + "Slice_133_Transpose_134": { + "sender": "Slice_133", + "receiver": "Transpose_134", + "sender_port": "_133", + "receiver_port": "_133" + }, + "Transpose_134_Reshape_136": { + "sender": "Transpose_134", + "receiver": "Reshape_136", "sender_port": "_134", "receiver_port": "_134" }, - "Transpose_135_Reshape_137": { - "sender": "Transpose_135", - "receiver": "Reshape_137", - "sender_port": "_135", - "receiver_port": "_135" + "Reshape_136_Cast_137": { + "sender": "Reshape_136", + "receiver": "Cast_137", + "sender_port": "_136", + "receiver_port": "_136" }, - "Reshape_137_Cast_138": { - "sender": "Reshape_137", - "receiver": "Cast_138", + "Cast_137_Pad_139": { + "sender": "Cast_137", + "receiver": "Pad_139", "sender_port": "_137", "receiver_port": "_137" }, - "Cast_138_Pad_140": { - "sender": "Cast_138", - "receiver": "Pad_140", - "sender_port": "_138", - "receiver_port": "_138" - }, - "Pad_140_Pad_142": { - "sender": "Pad_140", - "receiver": "Pad_142", - "sender_port": "_140", - "receiver_port": "_140" - }, - "Pad_142_AveragePool_143": { - "sender": "Pad_142", - "receiver": "AveragePool_143", + "Pad_139_Pad_141": { + "sender": "Pad_139", + "receiver": "Pad_141", + "sender_port": "_139", + "receiver_port": "_139" + }, + "Pad_141_AveragePool_142": { + "sender": "Pad_141", + "receiver": "AveragePool_142", + "sender_port": "_141", + "receiver_port": "_141" + }, + "AveragePool_142_Concat_155": { + "sender": "AveragePool_142", + "receiver": "Concat_155", "sender_port": "_142", "receiver_port": "_142" }, - "AveragePool_143_Concat_156": { - "sender": "AveragePool_143", - "receiver": "Concat_156", + "Conv_143_PRelu_146": { + "sender": "Conv_143", + "receiver": "PRelu_146", "sender_port": "_143", "receiver_port": "_143" }, - "Conv_144_PRelu_147": { - "sender": "Conv_144", - "receiver": "PRelu_147", - "sender_port": "_144", - "receiver_port": "_144" + "Unsqueeze_145_PRelu_146": { + "sender": "Unsqueeze_145", + "receiver": "PRelu_146", + "sender_port": "_145", + "receiver_port": "_145" }, - "Unsqueeze_146_PRelu_147": { - "sender": "Unsqueeze_146", - "receiver": "PRelu_147", + "PRelu_146_Concat_155": { + "sender": "PRelu_146", + "receiver": "Concat_155", "sender_port": "_146", "receiver_port": "_146" }, - "PRelu_147_Concat_156": { - "sender": "PRelu_147", - "receiver": "Concat_156", + "Conv_147_PRelu_150": { + "sender": "Conv_147", + "receiver": "PRelu_150", "sender_port": "_147", "receiver_port": "_147" }, - "Conv_148_PRelu_151": { - "sender": "Conv_148", - "receiver": "PRelu_151", - "sender_port": "_148", - "receiver_port": "_148" + "Unsqueeze_149_PRelu_150": { + "sender": "Unsqueeze_149", + "receiver": "PRelu_150", + "sender_port": "_149", + "receiver_port": "_149" }, - "Unsqueeze_150_PRelu_151": { - "sender": "Unsqueeze_150", - "receiver": "PRelu_151", + "PRelu_150_Conv_151": { + "sender": "PRelu_150", + "receiver": "Conv_151", "sender_port": "_150", "receiver_port": "_150" }, - "PRelu_151_Conv_152": { - "sender": "PRelu_151", - "receiver": "Conv_152", + "Conv_151_PRelu_154": { + "sender": "Conv_151", + "receiver": "PRelu_154", "sender_port": "_151", "receiver_port": "_151" }, - "Conv_152_PRelu_155": { - "sender": "Conv_152", - "receiver": "PRelu_155", - "sender_port": "_152", - "receiver_port": "_152" + "Unsqueeze_153_PRelu_154": { + "sender": "Unsqueeze_153", + "receiver": "PRelu_154", + "sender_port": "_153", + "receiver_port": "_153" }, - "Unsqueeze_154_PRelu_155": { - "sender": "Unsqueeze_154", - "receiver": "PRelu_155", + "PRelu_154_Concat_155": { + "sender": "PRelu_154", + "receiver": "Concat_155", "sender_port": "_154", "receiver_port": "_154" }, - "PRelu_155_Concat_156": { - "sender": "PRelu_155", - "receiver": "Concat_156", + "Concat_155_Conv_156": { + "sender": "Concat_155", + "receiver": "Conv_156", "sender_port": "_155", "receiver_port": "_155" }, - "Concat_156_Conv_157": { - "sender": "Concat_156", + "Concat_155_Conv_157": { + "sender": "Concat_155", "receiver": "Conv_157", - "sender_port": "_156", - "receiver_port": "_156" + "sender_port": "_155", + "receiver_port": "_155" }, - "Concat_156_Conv_158": { - "sender": "Concat_156", + "Concat_155_Conv_158": { + "sender": "Concat_155", "receiver": "Conv_158", - "sender_port": "_156", - "receiver_port": "_156" + "sender_port": "_155", + "receiver_port": "_155" }, - "Concat_156_Conv_159": { - "sender": "Concat_156", - "receiver": "Conv_159", - "sender_port": "_156", - "receiver_port": "_156" + "Concat_155_Conv_195": { + "sender": "Concat_155", + "receiver": "Conv_195", + "sender_port": "_155", + "receiver_port": "_155" }, - "Concat_156_Conv_198": { - "sender": "Concat_156", - "receiver": "Conv_198", + "Conv_156_PRelu_168": { + "sender": "Conv_156", + "receiver": "PRelu_168", "sender_port": "_156", "receiver_port": "_156" }, - "Conv_157_PRelu_169": { + "Conv_157_PRelu_205": { "sender": "Conv_157", - "receiver": "PRelu_169", + "receiver": "PRelu_205", "sender_port": "_157", "receiver_port": "_157" }, - "Conv_158_PRelu_208": { + "Conv_158_PRelu_161": { "sender": "Conv_158", - "receiver": "PRelu_208", + "receiver": "PRelu_161", "sender_port": "_158", "receiver_port": "_158" }, - "Conv_159_PRelu_162": { - "sender": "Conv_159", - "receiver": "PRelu_162", - "sender_port": "_159", - "receiver_port": "_159" + "Unsqueeze_160_PRelu_161": { + "sender": "Unsqueeze_160", + "receiver": "PRelu_161", + "sender_port": "_160", + "receiver_port": "_160" }, - "Unsqueeze_161_PRelu_162": { - "sender": "Unsqueeze_161", - "receiver": "PRelu_162", + "PRelu_161_Conv_162": { + "sender": "PRelu_161", + "receiver": "Conv_162", "sender_port": "_161", "receiver_port": "_161" }, - "PRelu_162_Conv_163": { - "sender": "PRelu_162", - "receiver": "Conv_163", + "Conv_162_PRelu_165": { + "sender": "Conv_162", + "receiver": "PRelu_165", "sender_port": "_162", "receiver_port": "_162" }, - "Conv_163_PRelu_166": { - "sender": "Conv_163", - "receiver": "PRelu_166", - "sender_port": "_163", - "receiver_port": "_163" + "Unsqueeze_164_PRelu_165": { + "sender": "Unsqueeze_164", + "receiver": "PRelu_165", + "sender_port": "_164", + "receiver_port": "_164" }, - "Unsqueeze_165_PRelu_166": { - "sender": "Unsqueeze_165", - "receiver": "PRelu_166", + "PRelu_165_Concat_206": { + "sender": "PRelu_165", + "receiver": "Concat_206", "sender_port": "_165", "receiver_port": "_165" }, - "PRelu_166_Concat_209": { - "sender": "PRelu_166", - "receiver": "Concat_209", - "sender_port": "_166", - "receiver_port": "_166" + "Unsqueeze_167_PRelu_168": { + "sender": "Unsqueeze_167", + "receiver": "PRelu_168", + "sender_port": "_167", + "receiver_port": "_167" }, - "Unsqueeze_168_PRelu_169": { - "sender": "Unsqueeze_168", - "receiver": "PRelu_169", + "PRelu_168_Pad_191": { + "sender": "PRelu_168", + "receiver": "Pad_191", "sender_port": "_168", "receiver_port": "_168" }, - "PRelu_169_Pad_194": { - "sender": "PRelu_169", - "receiver": "Pad_194", - "sender_port": "_169", - "receiver_port": "_169" - }, - "ConcatFromSequence_171_Shape_173": { - "sender": "ConcatFromSequence_171", - "receiver": "Shape_173", - "sender_port": "_171", - "receiver_port": "_171" + "Shape_170_Gather_171": { + "sender": "Shape_170", + "receiver": "Gather_171", + "sender_port": "_170", + "receiver_port": "_170" }, - "ConcatFromSequence_171_Cast_179": { - "sender": "ConcatFromSequence_171", - "receiver": "Cast_179", + "Gather_171_Sub_175": { + "sender": "Gather_171", + "receiver": "Sub_175", "sender_port": "_171", "receiver_port": "_171" }, - "Shape_173_Gather_174": { - "sender": "Shape_173", - "receiver": "Gather_174", - "sender_port": "_173", - "receiver_port": "_173" - }, - "Gather_174_Sub_178": { - "sender": "Gather_174", - "receiver": "Sub_178", + "Mul_174_Sub_175": { + "sender": "Mul_174", + "receiver": "Sub_175", "sender_port": "_174", "receiver_port": "_174" }, - "Mul_177_Sub_178": { - "sender": "Mul_177", - "receiver": "Sub_178", + "Sub_175_ConstantOfShape_177": { + "sender": "Sub_175", + "receiver": "ConstantOfShape_177", + "sender_port": "_175", + "receiver_port": "_175" + }, + "Cast_176_Concat_178": { + "sender": "Cast_176", + "receiver": "Concat_178", + "sender_port": "_176", + "receiver_port": "_176" + }, + "ConstantOfShape_177_Concat_178": { + "sender": "ConstantOfShape_177", + "receiver": "Concat_178", "sender_port": "_177", "receiver_port": "_177" }, - "Sub_178_ConstantOfShape_180": { - "sender": "Sub_178", - "receiver": "ConstantOfShape_180", + "Concat_178_Reshape_180": { + "sender": "Concat_178", + "receiver": "Reshape_180", "sender_port": "_178", "receiver_port": "_178" }, - "Cast_179_Concat_181": { - "sender": "Cast_179", - "receiver": "Concat_181", - "sender_port": "_179", - "receiver_port": "_179" - }, - "ConstantOfShape_180_Concat_181": { - "sender": "ConstantOfShape_180", - "receiver": "Concat_181", + "Reshape_180_Slice_185": { + "sender": "Reshape_180", + "receiver": "Slice_185", "sender_port": "_180", "receiver_port": "_180" }, - "Concat_181_Reshape_183": { - "sender": "Concat_181", - "receiver": "Reshape_183", - "sender_port": "_181", - "receiver_port": "_181" - }, - "Reshape_183_Slice_188": { - "sender": "Reshape_183", - "receiver": "Slice_188", - "sender_port": "_183", - "receiver_port": "_183" - }, - "Slice_188_Transpose_189": { - "sender": "Slice_188", - "receiver": "Transpose_189", + "Slice_185_Transpose_186": { + "sender": "Slice_185", + "receiver": "Transpose_186", + "sender_port": "_185", + "receiver_port": "_185" + }, + "Transpose_186_Reshape_188": { + "sender": "Transpose_186", + "receiver": "Reshape_188", + "sender_port": "_186", + "receiver_port": "_186" + }, + "Reshape_188_Cast_189": { + "sender": "Reshape_188", + "receiver": "Cast_189", "sender_port": "_188", "receiver_port": "_188" }, - "Transpose_189_Reshape_191": { - "sender": "Transpose_189", - "receiver": "Reshape_191", + "Cast_189_Pad_191": { + "sender": "Cast_189", + "receiver": "Pad_191", "sender_port": "_189", "receiver_port": "_189" }, - "Reshape_191_Cast_192": { - "sender": "Reshape_191", - "receiver": "Cast_192", + "Pad_191_Pad_193": { + "sender": "Pad_191", + "receiver": "Pad_193", "sender_port": "_191", "receiver_port": "_191" }, - "Cast_192_Pad_194": { - "sender": "Cast_192", - "receiver": "Pad_194", - "sender_port": "_192", - "receiver_port": "_192" + "Pad_193_AveragePool_194": { + "sender": "Pad_193", + "receiver": "AveragePool_194", + "sender_port": "_193", + "receiver_port": "_193" }, - "Pad_194_Pad_196": { - "sender": "Pad_194", - "receiver": "Pad_196", + "AveragePool_194_Concat_206": { + "sender": "AveragePool_194", + "receiver": "Concat_206", "sender_port": "_194", "receiver_port": "_194" }, - "Pad_196_AveragePool_197": { - "sender": "Pad_196", - "receiver": "AveragePool_197", - "sender_port": "_196", - "receiver_port": "_196" + "Conv_195_PRelu_198": { + "sender": "Conv_195", + "receiver": "PRelu_198", + "sender_port": "_195", + "receiver_port": "_195" }, - "AveragePool_197_Concat_209": { - "sender": "AveragePool_197", - "receiver": "Concat_209", + "Unsqueeze_197_PRelu_198": { + "sender": "Unsqueeze_197", + "receiver": "PRelu_198", "sender_port": "_197", "receiver_port": "_197" }, - "Conv_198_PRelu_201": { - "sender": "Conv_198", - "receiver": "PRelu_201", + "PRelu_198_Conv_199": { + "sender": "PRelu_198", + "receiver": "Conv_199", "sender_port": "_198", "receiver_port": "_198" }, - "Unsqueeze_200_PRelu_201": { - "sender": "Unsqueeze_200", - "receiver": "PRelu_201", - "sender_port": "_200", - "receiver_port": "_200" + "Conv_199_PRelu_202": { + "sender": "Conv_199", + "receiver": "PRelu_202", + "sender_port": "_199", + "receiver_port": "_199" }, - "PRelu_201_Conv_202": { - "sender": "PRelu_201", - "receiver": "Conv_202", + "Unsqueeze_201_PRelu_202": { + "sender": "Unsqueeze_201", + "receiver": "PRelu_202", "sender_port": "_201", "receiver_port": "_201" }, - "Conv_202_PRelu_205": { - "sender": "Conv_202", - "receiver": "PRelu_205", + "PRelu_202_Concat_206": { + "sender": "PRelu_202", + "receiver": "Concat_206", "sender_port": "_202", "receiver_port": "_202" }, @@ -8806,252 +9069,270 @@ "sender_port": "_204", "receiver_port": "_204" }, - "PRelu_205_Concat_209": { + "PRelu_205_Concat_206": { "sender": "PRelu_205", - "receiver": "Concat_209", + "receiver": "Concat_206", "sender_port": "_205", "receiver_port": "_205" }, - "Unsqueeze_207_PRelu_208": { - "sender": "Unsqueeze_207", - "receiver": "PRelu_208", - "sender_port": "_207", - "receiver_port": "_207" + "Concat_206_Pad_208": { + "sender": "Concat_206", + "receiver": "Pad_208", + "sender_port": "_206", + "receiver_port": "_206" }, - "PRelu_208_Concat_209": { - "sender": "PRelu_208", - "receiver": "Concat_209", + "Pad_208_AveragePool_209": { + "sender": "Pad_208", + "receiver": "AveragePool_209", "sender_port": "_208", "receiver_port": "_208" }, - "Concat_209_Pad_211": { - "sender": "Concat_209", - "receiver": "Pad_211", + "AveragePool_209_Conv_210": { + "sender": "AveragePool_209", + "receiver": "Conv_210", "sender_port": "_209", "receiver_port": "_209" }, - "Pad_211_AveragePool_212": { - "sender": "Pad_211", - "receiver": "AveragePool_212", - "sender_port": "_211", - "receiver_port": "_211" + "AveragePool_209_Conv_218": { + "sender": "AveragePool_209", + "receiver": "Conv_218", + "sender_port": "_209", + "receiver_port": "_209" }, - "AveragePool_212_Conv_213": { - "sender": "AveragePool_212", - "receiver": "Conv_213", - "sender_port": "_212", - "receiver_port": "_212" + "AveragePool_209_Conv_222": { + "sender": "AveragePool_209", + "receiver": "Conv_222", + "sender_port": "_209", + "receiver_port": "_209" }, - "AveragePool_212_Conv_221": { - "sender": "AveragePool_212", - "receiver": "Conv_221", - "sender_port": "_212", - "receiver_port": "_212" + "AveragePool_209_Conv_252": { + "sender": "AveragePool_209", + "receiver": "Conv_252", + "sender_port": "_209", + "receiver_port": "_209" }, - "AveragePool_212_Conv_225": { - "sender": "AveragePool_212", - "receiver": "Conv_225", - "sender_port": "_212", - "receiver_port": "_212" + "Conv_210_PRelu_213": { + "sender": "Conv_210", + "receiver": "PRelu_213", + "sender_port": "_210", + "receiver_port": "_210" }, - "AveragePool_212_Conv_257": { - "sender": "AveragePool_212", - "receiver": "Conv_257", + "Unsqueeze_212_PRelu_213": { + "sender": "Unsqueeze_212", + "receiver": "PRelu_213", "sender_port": "_212", "receiver_port": "_212" }, - "Conv_213_PRelu_216": { - "sender": "Conv_213", - "receiver": "PRelu_216", + "PRelu_213_Conv_214": { + "sender": "PRelu_213", + "receiver": "Conv_214", "sender_port": "_213", "receiver_port": "_213" }, - "Unsqueeze_215_PRelu_216": { - "sender": "Unsqueeze_215", - "receiver": "PRelu_216", - "sender_port": "_215", - "receiver_port": "_215" + "Conv_214_PRelu_217": { + "sender": "Conv_214", + "receiver": "PRelu_217", + "sender_port": "_214", + "receiver_port": "_214" }, - "PRelu_216_Conv_217": { - "sender": "PRelu_216", - "receiver": "Conv_217", + "Unsqueeze_216_PRelu_217": { + "sender": "Unsqueeze_216", + "receiver": "PRelu_217", "sender_port": "_216", "receiver_port": "_216" }, - "Conv_217_PRelu_220": { - "sender": "Conv_217", - "receiver": "PRelu_220", + "PRelu_217_Concat_260": { + "sender": "PRelu_217", + "receiver": "Concat_260", "sender_port": "_217", "receiver_port": "_217" }, - "Unsqueeze_219_PRelu_220": { - "sender": "Unsqueeze_219", - "receiver": "PRelu_220", - "sender_port": "_219", - "receiver_port": "_219" + "Conv_218_PRelu_221": { + "sender": "Conv_218", + "receiver": "PRelu_221", + "sender_port": "_218", + "receiver_port": "_218" }, - "PRelu_220_Concat_265": { - "sender": "PRelu_220", - "receiver": "Concat_265", + "Unsqueeze_220_PRelu_221": { + "sender": "Unsqueeze_220", + "receiver": "PRelu_221", "sender_port": "_220", "receiver_port": "_220" }, - "Conv_221_PRelu_224": { - "sender": "Conv_221", - "receiver": "PRelu_224", + "PRelu_221_Concat_260": { + "sender": "PRelu_221", + "receiver": "Concat_260", "sender_port": "_221", "receiver_port": "_221" }, - "Unsqueeze_223_PRelu_224": { - "sender": "Unsqueeze_223", - "receiver": "PRelu_224", - "sender_port": "_223", - "receiver_port": "_223" + "Conv_222_PRelu_225": { + "sender": "Conv_222", + "receiver": "PRelu_225", + "sender_port": "_222", + "receiver_port": "_222" }, - "PRelu_224_Concat_265": { - "sender": "PRelu_224", - "receiver": "Concat_265", + "Unsqueeze_224_PRelu_225": { + "sender": "Unsqueeze_224", + "receiver": "PRelu_225", "sender_port": "_224", "receiver_port": "_224" }, - "Conv_225_PRelu_228": { - "sender": "Conv_225", - "receiver": "PRelu_228", + "PRelu_225_Pad_248": { + "sender": "PRelu_225", + "receiver": "Pad_248", "sender_port": "_225", "receiver_port": "_225" }, - "Unsqueeze_227_PRelu_228": { - "sender": "Unsqueeze_227", - "receiver": "PRelu_228", + "Shape_227_Gather_228": { + "sender": "Shape_227", + "receiver": "Gather_228", "sender_port": "_227", "receiver_port": "_227" }, - "PRelu_228_Pad_253": { - "sender": "PRelu_228", - "receiver": "Pad_253", + "Gather_228_Sub_232": { + "sender": "Gather_228", + "receiver": "Sub_232", "sender_port": "_228", "receiver_port": "_228" }, - "ConcatFromSequence_230_Shape_232": { - "sender": "ConcatFromSequence_230", - "receiver": "Shape_232", - "sender_port": "_230", - "receiver_port": "_230" - }, - "ConcatFromSequence_230_Cast_238": { - "sender": "ConcatFromSequence_230", - "receiver": "Cast_238", - "sender_port": "_230", - "receiver_port": "_230" - }, - "Shape_232_Gather_233": { - "sender": "Shape_232", - "receiver": "Gather_233", + "Mul_231_Sub_232": { + "sender": "Mul_231", + "receiver": "Sub_232", + "sender_port": "_231", + "receiver_port": "_231" + }, + "Sub_232_ConstantOfShape_234": { + "sender": "Sub_232", + "receiver": "ConstantOfShape_234", "sender_port": "_232", "receiver_port": "_232" }, - "Gather_233_Sub_237": { - "sender": "Gather_233", - "receiver": "Sub_237", + "Cast_233_Concat_235": { + "sender": "Cast_233", + "receiver": "Concat_235", "sender_port": "_233", "receiver_port": "_233" }, - "Mul_236_Sub_237": { - "sender": "Mul_236", - "receiver": "Sub_237", - "sender_port": "_236", - "receiver_port": "_236" - }, - "Sub_237_ConstantOfShape_239": { - "sender": "Sub_237", - "receiver": "ConstantOfShape_239", + "ConstantOfShape_234_Concat_235": { + "sender": "ConstantOfShape_234", + "receiver": "Concat_235", + "sender_port": "_234", + "receiver_port": "_234" + }, + "Concat_235_Reshape_237": { + "sender": "Concat_235", + "receiver": "Reshape_237", + "sender_port": "_235", + "receiver_port": "_235" + }, + "Reshape_237_Slice_242": { + "sender": "Reshape_237", + "receiver": "Slice_242", "sender_port": "_237", "receiver_port": "_237" }, - "Cast_238_Concat_240": { - "sender": "Cast_238", - "receiver": "Concat_240", - "sender_port": "_238", - "receiver_port": "_238" - }, - "ConstantOfShape_239_Concat_240": { - "sender": "ConstantOfShape_239", - "receiver": "Concat_240", - "sender_port": "_239", - "receiver_port": "_239" - }, - "Concat_240_Reshape_242": { - "sender": "Concat_240", - "receiver": "Reshape_242", - "sender_port": "_240", - "receiver_port": "_240" - }, - "Reshape_242_Slice_247": { - "sender": "Reshape_242", - "receiver": "Slice_247", + "Slice_242_Transpose_243": { + "sender": "Slice_242", + "receiver": "Transpose_243", "sender_port": "_242", "receiver_port": "_242" }, - "Slice_247_Transpose_248": { - "sender": "Slice_247", - "receiver": "Transpose_248", - "sender_port": "_247", - "receiver_port": "_247" - }, - "Transpose_248_Reshape_250": { - "sender": "Transpose_248", - "receiver": "Reshape_250", + "Transpose_243_Reshape_245": { + "sender": "Transpose_243", + "receiver": "Reshape_245", + "sender_port": "_243", + "receiver_port": "_243" + }, + "Reshape_245_Cast_246": { + "sender": "Reshape_245", + "receiver": "Cast_246", + "sender_port": "_245", + "receiver_port": "_245" + }, + "Cast_246_Pad_248": { + "sender": "Cast_246", + "receiver": "Pad_248", + "sender_port": "_246", + "receiver_port": "_246" + }, + "Pad_248_Pad_250": { + "sender": "Pad_248", + "receiver": "Pad_250", "sender_port": "_248", "receiver_port": "_248" }, - "Reshape_250_Cast_251": { - "sender": "Reshape_250", - "receiver": "Cast_251", + "Pad_250_AveragePool_251": { + "sender": "Pad_250", + "receiver": "AveragePool_251", "sender_port": "_250", "receiver_port": "_250" }, - "Cast_251_Pad_253": { - "sender": "Cast_251", - "receiver": "Pad_253", + "AveragePool_251_Concat_260": { + "sender": "AveragePool_251", + "receiver": "Concat_260", "sender_port": "_251", "receiver_port": "_251" }, - "Pad_253_Pad_255": { - "sender": "Pad_253", - "receiver": "Pad_255", - "sender_port": "_253", - "receiver_port": "_253" - }, - "Pad_255_AveragePool_256": { - "sender": "Pad_255", - "receiver": "AveragePool_256", + "Conv_252_PRelu_255": { + "sender": "Conv_252", + "receiver": "PRelu_255", + "sender_port": "_252", + "receiver_port": "_252" + }, + "Unsqueeze_254_PRelu_255": { + "sender": "Unsqueeze_254", + "receiver": "PRelu_255", + "sender_port": "_254", + "receiver_port": "_254" + }, + "PRelu_255_Conv_256": { + "sender": "PRelu_255", + "receiver": "Conv_256", "sender_port": "_255", "receiver_port": "_255" }, - "AveragePool_256_Concat_265": { - "sender": "AveragePool_256", - "receiver": "Concat_265", + "Conv_256_PRelu_259": { + "sender": "Conv_256", + "receiver": "PRelu_259", "sender_port": "_256", "receiver_port": "_256" }, - "Conv_257_PRelu_260": { - "sender": "Conv_257", - "receiver": "PRelu_260", - "sender_port": "_257", - "receiver_port": "_257" + "Unsqueeze_258_PRelu_259": { + "sender": "Unsqueeze_258", + "receiver": "PRelu_259", + "sender_port": "_258", + "receiver_port": "_258" }, - "Unsqueeze_259_PRelu_260": { - "sender": "Unsqueeze_259", - "receiver": "PRelu_260", + "PRelu_259_Concat_260": { + "sender": "PRelu_259", + "receiver": "Concat_260", "sender_port": "_259", "receiver_port": "_259" }, - "PRelu_260_Conv_261": { - "sender": "PRelu_260", + "Concat_260_Conv_261": { + "sender": "Concat_260", "receiver": "Conv_261", "sender_port": "_260", "receiver_port": "_260" }, + "Concat_260_Conv_269": { + "sender": "Concat_260", + "receiver": "Conv_269", + "sender_port": "_260", + "receiver_port": "_260" + }, + "Concat_260_Conv_299": { + "sender": "Concat_260", + "receiver": "Conv_299", + "sender_port": "_260", + "receiver_port": "_260" + }, + "Concat_260_Conv_307": { + "sender": "Concat_260", + "receiver": "Conv_307", + "sender_port": "_260", + "receiver_port": "_260" + }, "Conv_261_PRelu_264": { "sender": "Conv_261", "receiver": "PRelu_264", @@ -9064,555 +9345,489 @@ "sender_port": "_263", "receiver_port": "_263" }, - "PRelu_264_Concat_265": { + "PRelu_264_Conv_265": { "sender": "PRelu_264", - "receiver": "Concat_265", + "receiver": "Conv_265", "sender_port": "_264", "receiver_port": "_264" }, - "Concat_265_Conv_266": { - "sender": "Concat_265", - "receiver": "Conv_266", - "sender_port": "_265", - "receiver_port": "_265" - }, - "Concat_265_Conv_274": { - "sender": "Concat_265", - "receiver": "Conv_274", + "Conv_265_PRelu_268": { + "sender": "Conv_265", + "receiver": "PRelu_268", "sender_port": "_265", "receiver_port": "_265" }, - "Concat_265_Conv_306": { - "sender": "Concat_265", - "receiver": "Conv_306", - "sender_port": "_265", - "receiver_port": "_265" - }, - "Concat_265_Conv_314": { - "sender": "Concat_265", - "receiver": "Conv_314", - "sender_port": "_265", - "receiver_port": "_265" - }, - "Conv_266_PRelu_269": { - "sender": "Conv_266", - "receiver": "PRelu_269", - "sender_port": "_266", - "receiver_port": "_266" + "Unsqueeze_267_PRelu_268": { + "sender": "Unsqueeze_267", + "receiver": "PRelu_268", + "sender_port": "_267", + "receiver_port": "_267" }, - "Unsqueeze_268_PRelu_269": { - "sender": "Unsqueeze_268", - "receiver": "PRelu_269", + "PRelu_268_Concat_311": { + "sender": "PRelu_268", + "receiver": "Concat_311", "sender_port": "_268", "receiver_port": "_268" }, - "PRelu_269_Conv_270": { - "sender": "PRelu_269", - "receiver": "Conv_270", + "Conv_269_PRelu_272": { + "sender": "Conv_269", + "receiver": "PRelu_272", "sender_port": "_269", "receiver_port": "_269" }, - "Conv_270_PRelu_273": { - "sender": "Conv_270", - "receiver": "PRelu_273", - "sender_port": "_270", - "receiver_port": "_270" + "Unsqueeze_271_PRelu_272": { + "sender": "Unsqueeze_271", + "receiver": "PRelu_272", + "sender_port": "_271", + "receiver_port": "_271" }, - "Unsqueeze_272_PRelu_273": { - "sender": "Unsqueeze_272", - "receiver": "PRelu_273", + "PRelu_272_Pad_295": { + "sender": "PRelu_272", + "receiver": "Pad_295", "sender_port": "_272", "receiver_port": "_272" }, - "PRelu_273_Concat_318": { - "sender": "PRelu_273", - "receiver": "Concat_318", - "sender_port": "_273", - "receiver_port": "_273" - }, - "Conv_274_PRelu_277": { - "sender": "Conv_274", - "receiver": "PRelu_277", + "Shape_274_Gather_275": { + "sender": "Shape_274", + "receiver": "Gather_275", "sender_port": "_274", "receiver_port": "_274" }, - "Unsqueeze_276_PRelu_277": { - "sender": "Unsqueeze_276", - "receiver": "PRelu_277", - "sender_port": "_276", - "receiver_port": "_276" - }, - "PRelu_277_Pad_302": { - "sender": "PRelu_277", - "receiver": "Pad_302", - "sender_port": "_277", - "receiver_port": "_277" - }, - "ConcatFromSequence_279_Shape_281": { - "sender": "ConcatFromSequence_279", - "receiver": "Shape_281", + "Gather_275_Sub_279": { + "sender": "Gather_275", + "receiver": "Sub_279", + "sender_port": "_275", + "receiver_port": "_275" + }, + "Mul_278_Sub_279": { + "sender": "Mul_278", + "receiver": "Sub_279", + "sender_port": "_278", + "receiver_port": "_278" + }, + "Sub_279_ConstantOfShape_281": { + "sender": "Sub_279", + "receiver": "ConstantOfShape_281", "sender_port": "_279", "receiver_port": "_279" }, - "ConcatFromSequence_279_Cast_287": { - "sender": "ConcatFromSequence_279", - "receiver": "Cast_287", - "sender_port": "_279", - "receiver_port": "_279" + "Cast_280_Concat_282": { + "sender": "Cast_280", + "receiver": "Concat_282", + "sender_port": "_280", + "receiver_port": "_280" }, - "Shape_281_Gather_282": { - "sender": "Shape_281", - "receiver": "Gather_282", + "ConstantOfShape_281_Concat_282": { + "sender": "ConstantOfShape_281", + "receiver": "Concat_282", "sender_port": "_281", "receiver_port": "_281" }, - "Gather_282_Sub_286": { - "sender": "Gather_282", - "receiver": "Sub_286", + "Concat_282_Reshape_284": { + "sender": "Concat_282", + "receiver": "Reshape_284", "sender_port": "_282", "receiver_port": "_282" }, - "Mul_285_Sub_286": { - "sender": "Mul_285", - "receiver": "Sub_286", - "sender_port": "_285", - "receiver_port": "_285" - }, - "Sub_286_ConstantOfShape_288": { - "sender": "Sub_286", - "receiver": "ConstantOfShape_288", - "sender_port": "_286", - "receiver_port": "_286" - }, - "Cast_287_Concat_289": { - "sender": "Cast_287", - "receiver": "Concat_289", - "sender_port": "_287", - "receiver_port": "_287" - }, - "ConstantOfShape_288_Concat_289": { - "sender": "ConstantOfShape_288", - "receiver": "Concat_289", - "sender_port": "_288", - "receiver_port": "_288" - }, - "Concat_289_Reshape_291": { - "sender": "Concat_289", - "receiver": "Reshape_291", + "Reshape_284_Slice_289": { + "sender": "Reshape_284", + "receiver": "Slice_289", + "sender_port": "_284", + "receiver_port": "_284" + }, + "Slice_289_Transpose_290": { + "sender": "Slice_289", + "receiver": "Transpose_290", "sender_port": "_289", "receiver_port": "_289" }, - "Reshape_291_Slice_296": { - "sender": "Reshape_291", - "receiver": "Slice_296", - "sender_port": "_291", - "receiver_port": "_291" - }, - "Slice_296_Transpose_297": { - "sender": "Slice_296", - "receiver": "Transpose_297", - "sender_port": "_296", - "receiver_port": "_296" - }, - "Transpose_297_Reshape_299": { - "sender": "Transpose_297", - "receiver": "Reshape_299", + "Transpose_290_Reshape_292": { + "sender": "Transpose_290", + "receiver": "Reshape_292", + "sender_port": "_290", + "receiver_port": "_290" + }, + "Reshape_292_Cast_293": { + "sender": "Reshape_292", + "receiver": "Cast_293", + "sender_port": "_292", + "receiver_port": "_292" + }, + "Cast_293_Pad_295": { + "sender": "Cast_293", + "receiver": "Pad_295", + "sender_port": "_293", + "receiver_port": "_293" + }, + "Pad_295_Pad_297": { + "sender": "Pad_295", + "receiver": "Pad_297", + "sender_port": "_295", + "receiver_port": "_295" + }, + "Pad_297_AveragePool_298": { + "sender": "Pad_297", + "receiver": "AveragePool_298", "sender_port": "_297", "receiver_port": "_297" }, - "Reshape_299_Cast_300": { - "sender": "Reshape_299", - "receiver": "Cast_300", + "AveragePool_298_Concat_311": { + "sender": "AveragePool_298", + "receiver": "Concat_311", + "sender_port": "_298", + "receiver_port": "_298" + }, + "Conv_299_PRelu_302": { + "sender": "Conv_299", + "receiver": "PRelu_302", "sender_port": "_299", "receiver_port": "_299" }, - "Cast_300_Pad_302": { - "sender": "Cast_300", - "receiver": "Pad_302", - "sender_port": "_300", - "receiver_port": "_300" + "Unsqueeze_301_PRelu_302": { + "sender": "Unsqueeze_301", + "receiver": "PRelu_302", + "sender_port": "_301", + "receiver_port": "_301" }, - "Pad_302_Pad_304": { - "sender": "Pad_302", - "receiver": "Pad_304", + "PRelu_302_Conv_303": { + "sender": "PRelu_302", + "receiver": "Conv_303", "sender_port": "_302", "receiver_port": "_302" }, - "Pad_304_AveragePool_305": { - "sender": "Pad_304", - "receiver": "AveragePool_305", - "sender_port": "_304", - "receiver_port": "_304" + "Conv_303_PRelu_306": { + "sender": "Conv_303", + "receiver": "PRelu_306", + "sender_port": "_303", + "receiver_port": "_303" }, - "AveragePool_305_Concat_318": { - "sender": "AveragePool_305", - "receiver": "Concat_318", + "Unsqueeze_305_PRelu_306": { + "sender": "Unsqueeze_305", + "receiver": "PRelu_306", "sender_port": "_305", "receiver_port": "_305" }, - "Conv_306_PRelu_309": { - "sender": "Conv_306", - "receiver": "PRelu_309", + "PRelu_306_Concat_311": { + "sender": "PRelu_306", + "receiver": "Concat_311", "sender_port": "_306", "receiver_port": "_306" }, - "Unsqueeze_308_PRelu_309": { - "sender": "Unsqueeze_308", - "receiver": "PRelu_309", - "sender_port": "_308", - "receiver_port": "_308" + "Conv_307_PRelu_310": { + "sender": "Conv_307", + "receiver": "PRelu_310", + "sender_port": "_307", + "receiver_port": "_307" }, - "PRelu_309_Conv_310": { - "sender": "PRelu_309", - "receiver": "Conv_310", + "Unsqueeze_309_PRelu_310": { + "sender": "Unsqueeze_309", + "receiver": "PRelu_310", "sender_port": "_309", "receiver_port": "_309" }, - "Conv_310_PRelu_313": { - "sender": "Conv_310", - "receiver": "PRelu_313", + "PRelu_310_Concat_311": { + "sender": "PRelu_310", + "receiver": "Concat_311", "sender_port": "_310", "receiver_port": "_310" }, - "Unsqueeze_312_PRelu_313": { - "sender": "Unsqueeze_312", - "receiver": "PRelu_313", - "sender_port": "_312", - "receiver_port": "_312" + "Concat_311_Pad_313": { + "sender": "Concat_311", + "receiver": "Pad_313", + "sender_port": "_311", + "receiver_port": "_311" }, - "PRelu_313_Concat_318": { - "sender": "PRelu_313", - "receiver": "Concat_318", + "Pad_313_AveragePool_314": { + "sender": "Pad_313", + "receiver": "AveragePool_314", "sender_port": "_313", "receiver_port": "_313" }, - "Conv_314_PRelu_317": { - "sender": "Conv_314", - "receiver": "PRelu_317", + "AveragePool_314_Conv_315": { + "sender": "AveragePool_314", + "receiver": "Conv_315", + "sender_port": "_314", + "receiver_port": "_314" + }, + "AveragePool_314_Conv_345": { + "sender": "AveragePool_314", + "receiver": "Conv_345", + "sender_port": "_314", + "receiver_port": "_314" + }, + "AveragePool_314_Conv_353": { + "sender": "AveragePool_314", + "receiver": "Conv_353", "sender_port": "_314", "receiver_port": "_314" }, - "Unsqueeze_316_PRelu_317": { - "sender": "Unsqueeze_316", - "receiver": "PRelu_317", - "sender_port": "_316", - "receiver_port": "_316" + "Conv_315_PRelu_318": { + "sender": "Conv_315", + "receiver": "PRelu_318", + "sender_port": "_315", + "receiver_port": "_315" }, - "PRelu_317_Concat_318": { - "sender": "PRelu_317", - "receiver": "Concat_318", + "Unsqueeze_317_PRelu_318": { + "sender": "Unsqueeze_317", + "receiver": "PRelu_318", "sender_port": "_317", "receiver_port": "_317" }, - "Concat_318_Pad_320": { - "sender": "Concat_318", - "receiver": "Pad_320", + "PRelu_318_Pad_341": { + "sender": "PRelu_318", + "receiver": "Pad_341", "sender_port": "_318", "receiver_port": "_318" }, - "Pad_320_AveragePool_321": { - "sender": "Pad_320", - "receiver": "AveragePool_321", + "Shape_320_Gather_321": { + "sender": "Shape_320", + "receiver": "Gather_321", "sender_port": "_320", "receiver_port": "_320" }, - "AveragePool_321_Conv_322": { - "sender": "AveragePool_321", - "receiver": "Conv_322", - "sender_port": "_321", - "receiver_port": "_321" - }, - "AveragePool_321_Conv_354": { - "sender": "AveragePool_321", - "receiver": "Conv_354", + "Gather_321_Sub_325": { + "sender": "Gather_321", + "receiver": "Sub_325", "sender_port": "_321", "receiver_port": "_321" }, - "AveragePool_321_Conv_362": { - "sender": "AveragePool_321", - "receiver": "Conv_362", - "sender_port": "_321", - "receiver_port": "_321" - }, - "Conv_322_PRelu_325": { - "sender": "Conv_322", - "receiver": "PRelu_325", - "sender_port": "_322", - "receiver_port": "_322" - }, - "Unsqueeze_324_PRelu_325": { - "sender": "Unsqueeze_324", - "receiver": "PRelu_325", + "Mul_324_Sub_325": { + "sender": "Mul_324", + "receiver": "Sub_325", "sender_port": "_324", "receiver_port": "_324" }, - "PRelu_325_Pad_350": { - "sender": "PRelu_325", - "receiver": "Pad_350", + "Sub_325_ConstantOfShape_327": { + "sender": "Sub_325", + "receiver": "ConstantOfShape_327", "sender_port": "_325", "receiver_port": "_325" }, - "ConcatFromSequence_327_Shape_329": { - "sender": "ConcatFromSequence_327", - "receiver": "Shape_329", - "sender_port": "_327", - "receiver_port": "_327" + "Cast_326_Concat_328": { + "sender": "Cast_326", + "receiver": "Concat_328", + "sender_port": "_326", + "receiver_port": "_326" }, - "ConcatFromSequence_327_Cast_335": { - "sender": "ConcatFromSequence_327", - "receiver": "Cast_335", + "ConstantOfShape_327_Concat_328": { + "sender": "ConstantOfShape_327", + "receiver": "Concat_328", "sender_port": "_327", "receiver_port": "_327" }, - "Shape_329_Gather_330": { - "sender": "Shape_329", - "receiver": "Gather_330", - "sender_port": "_329", - "receiver_port": "_329" + "Concat_328_Reshape_330": { + "sender": "Concat_328", + "receiver": "Reshape_330", + "sender_port": "_328", + "receiver_port": "_328" }, - "Gather_330_Sub_334": { - "sender": "Gather_330", - "receiver": "Sub_334", + "Reshape_330_Slice_335": { + "sender": "Reshape_330", + "receiver": "Slice_335", "sender_port": "_330", "receiver_port": "_330" }, - "Mul_333_Sub_334": { - "sender": "Mul_333", - "receiver": "Sub_334", - "sender_port": "_333", - "receiver_port": "_333" - }, - "Sub_334_ConstantOfShape_336": { - "sender": "Sub_334", - "receiver": "ConstantOfShape_336", - "sender_port": "_334", - "receiver_port": "_334" - }, - "Cast_335_Concat_337": { - "sender": "Cast_335", - "receiver": "Concat_337", + "Slice_335_Transpose_336": { + "sender": "Slice_335", + "receiver": "Transpose_336", "sender_port": "_335", "receiver_port": "_335" }, - "ConstantOfShape_336_Concat_337": { - "sender": "ConstantOfShape_336", - "receiver": "Concat_337", + "Transpose_336_Reshape_338": { + "sender": "Transpose_336", + "receiver": "Reshape_338", "sender_port": "_336", "receiver_port": "_336" }, - "Concat_337_Reshape_339": { - "sender": "Concat_337", - "receiver": "Reshape_339", - "sender_port": "_337", - "receiver_port": "_337" + "Reshape_338_Cast_339": { + "sender": "Reshape_338", + "receiver": "Cast_339", + "sender_port": "_338", + "receiver_port": "_338" }, - "Reshape_339_Slice_344": { - "sender": "Reshape_339", - "receiver": "Slice_344", + "Cast_339_Pad_341": { + "sender": "Cast_339", + "receiver": "Pad_341", "sender_port": "_339", "receiver_port": "_339" }, - "Slice_344_Transpose_345": { - "sender": "Slice_344", - "receiver": "Transpose_345", + "Pad_341_Pad_343": { + "sender": "Pad_341", + "receiver": "Pad_343", + "sender_port": "_341", + "receiver_port": "_341" + }, + "Pad_343_AveragePool_344": { + "sender": "Pad_343", + "receiver": "AveragePool_344", + "sender_port": "_343", + "receiver_port": "_343" + }, + "AveragePool_344_Concat_357": { + "sender": "AveragePool_344", + "receiver": "Concat_357", "sender_port": "_344", "receiver_port": "_344" }, - "Transpose_345_Reshape_347": { - "sender": "Transpose_345", - "receiver": "Reshape_347", + "Conv_345_PRelu_348": { + "sender": "Conv_345", + "receiver": "PRelu_348", "sender_port": "_345", "receiver_port": "_345" }, - "Reshape_347_Cast_348": { - "sender": "Reshape_347", - "receiver": "Cast_348", + "Unsqueeze_347_PRelu_348": { + "sender": "Unsqueeze_347", + "receiver": "PRelu_348", "sender_port": "_347", "receiver_port": "_347" }, - "Cast_348_Pad_350": { - "sender": "Cast_348", - "receiver": "Pad_350", + "PRelu_348_Conv_349": { + "sender": "PRelu_348", + "receiver": "Conv_349", "sender_port": "_348", "receiver_port": "_348" }, - "Pad_350_Pad_352": { - "sender": "Pad_350", - "receiver": "Pad_352", - "sender_port": "_350", - "receiver_port": "_350" - }, - "Pad_352_AveragePool_353": { - "sender": "Pad_352", - "receiver": "AveragePool_353", + "Conv_349_PRelu_352": { + "sender": "Conv_349", + "receiver": "PRelu_352", + "sender_port": "_349", + "receiver_port": "_349" + }, + "Unsqueeze_351_PRelu_352": { + "sender": "Unsqueeze_351", + "receiver": "PRelu_352", + "sender_port": "_351", + "receiver_port": "_351" + }, + "PRelu_352_Concat_357": { + "sender": "PRelu_352", + "receiver": "Concat_357", "sender_port": "_352", "receiver_port": "_352" }, - "AveragePool_353_Concat_366": { - "sender": "AveragePool_353", - "receiver": "Concat_366", + "Conv_353_PRelu_356": { + "sender": "Conv_353", + "receiver": "PRelu_356", "sender_port": "_353", "receiver_port": "_353" }, - "Conv_354_PRelu_357": { - "sender": "Conv_354", - "receiver": "PRelu_357", - "sender_port": "_354", - "receiver_port": "_354" + "Unsqueeze_355_PRelu_356": { + "sender": "Unsqueeze_355", + "receiver": "PRelu_356", + "sender_port": "_355", + "receiver_port": "_355" }, - "Unsqueeze_356_PRelu_357": { - "sender": "Unsqueeze_356", - "receiver": "PRelu_357", + "PRelu_356_Concat_357": { + "sender": "PRelu_356", + "receiver": "Concat_357", "sender_port": "_356", "receiver_port": "_356" }, - "PRelu_357_Conv_358": { - "sender": "PRelu_357", - "receiver": "Conv_358", + "Concat_357_Shape_358": { + "sender": "Concat_357", + "receiver": "Shape_358", "sender_port": "_357", "receiver_port": "_357" }, - "Conv_358_PRelu_361": { - "sender": "Conv_358", - "receiver": "PRelu_361", + "Concat_357_Reshape_365": { + "sender": "Concat_357", + "receiver": "Reshape_365", + "sender_port": "_357", + "receiver_port": "_357" + }, + "Shape_358_Slice_362": { + "sender": "Shape_358", + "receiver": "Slice_362", "sender_port": "_358", "receiver_port": "_358" }, - "Unsqueeze_360_PRelu_361": { - "sender": "Unsqueeze_360", - "receiver": "PRelu_361", - "sender_port": "_360", - "receiver_port": "_360" - }, - "PRelu_361_Concat_366": { - "sender": "PRelu_361", - "receiver": "Concat_366", - "sender_port": "_361", - "receiver_port": "_361" - }, - "Conv_362_PRelu_365": { - "sender": "Conv_362", - "receiver": "PRelu_365", + "Slice_362_Concat_364": { + "sender": "Slice_362", + "receiver": "Concat_364", "sender_port": "_362", "receiver_port": "_362" }, - "Unsqueeze_364_PRelu_365": { - "sender": "Unsqueeze_364", - "receiver": "PRelu_365", + "Concat_364_Reshape_365": { + "sender": "Concat_364", + "receiver": "Reshape_365", "sender_port": "_364", "receiver_port": "_364" }, - "PRelu_365_Concat_366": { - "sender": "PRelu_365", + "Reshape_365_Concat_366": { + "sender": "Reshape_365", "receiver": "Concat_366", "sender_port": "_365", "receiver_port": "_365" }, - "Concat_366_Shape_367": { - "sender": "Concat_366", - "receiver": "Shape_367", - "sender_port": "_366", - "receiver_port": "_366" - }, - "Concat_366_Reshape_374": { + "Concat_366_MatMul_368": { "sender": "Concat_366", - "receiver": "Reshape_374", + "receiver": "MatMul_368", "sender_port": "_366", "receiver_port": "_366" }, - "Shape_367_Slice_371": { - "sender": "Shape_367", - "receiver": "Slice_371", + "Transpose_367_MatMul_368": { + "sender": "Transpose_367", + "receiver": "MatMul_368", "sender_port": "_367", "receiver_port": "_367" }, - "Slice_371_Concat_373": { - "sender": "Slice_371", - "receiver": "Concat_373", + "MatMul_368_Add_369": { + "sender": "MatMul_368", + "receiver": "Add_369", + "sender_port": "_368", + "receiver_port": "_368" + }, + "Add_369_PRelu_370": { + "sender": "Add_369", + "receiver": "PRelu_370", + "sender_port": "_369", + "receiver_port": "_369" + }, + "PRelu_370_MatMul_372": { + "sender": "PRelu_370", + "receiver": "MatMul_372", + "sender_port": "_370", + "receiver_port": "_370" + }, + "Transpose_371_MatMul_372": { + "sender": "Transpose_371", + "receiver": "MatMul_372", "sender_port": "_371", "receiver_port": "_371" }, - "Concat_373_Reshape_374": { - "sender": "Concat_373", - "receiver": "Reshape_374", + "MatMul_372_Add_373": { + "sender": "MatMul_372", + "receiver": "Add_373", + "sender_port": "_372", + "receiver_port": "_372" + }, + "Add_373_PRelu_374": { + "sender": "Add_373", + "receiver": "PRelu_374", "sender_port": "_373", "receiver_port": "_373" }, - "Reshape_374_Concat_375": { - "sender": "Reshape_374", - "receiver": "Concat_375", + "PRelu_374_MatMul_376": { + "sender": "PRelu_374", + "receiver": "MatMul_376", "sender_port": "_374", "receiver_port": "_374" }, - "Concat_375_MatMul_377": { - "sender": "Concat_375", - "receiver": "MatMul_377", + "Transpose_375_MatMul_376": { + "sender": "Transpose_375", + "receiver": "MatMul_376", "sender_port": "_375", "receiver_port": "_375" }, - "Transpose_376_MatMul_377": { - "sender": "Transpose_376", - "receiver": "MatMul_377", + "MatMul_376_Add_377": { + "sender": "MatMul_376", + "receiver": "Add_377", "sender_port": "_376", "receiver_port": "_376" - }, - "MatMul_377_Add_378": { - "sender": "MatMul_377", - "receiver": "Add_378", - "sender_port": "_377", - "receiver_port": "_377" - }, - "Add_378_PRelu_379": { - "sender": "Add_378", - "receiver": "PRelu_379", - "sender_port": "_378", - "receiver_port": "_378" - }, - "PRelu_379_MatMul_381": { - "sender": "PRelu_379", - "receiver": "MatMul_381", - "sender_port": "_379", - "receiver_port": "_379" - }, - "Transpose_380_MatMul_381": { - "sender": "Transpose_380", - "receiver": "MatMul_381", - "sender_port": "_380", - "receiver_port": "_380" - }, - "MatMul_381_Add_382": { - "sender": "MatMul_381", - "receiver": "Add_382", - "sender_port": "_381", - "receiver_port": "_381" - }, - "Add_382_PRelu_383": { - "sender": "Add_382", - "receiver": "PRelu_383", - "sender_port": "_382", - "receiver_port": "_382" - }, - "PRelu_383_MatMul_385": { - "sender": "PRelu_383", - "receiver": "MatMul_385", - "sender_port": "_383", - "receiver_port": "_383" - }, - "Transpose_384_MatMul_385": { - "sender": "Transpose_384", - "receiver": "MatMul_385", - "sender_port": "_384", - "receiver_port": "_384" - }, - "MatMul_385_Add_386": { - "sender": "MatMul_385", - "receiver": "Add_386", - "sender_port": "_385", - "receiver_port": "_385" } } } }, - "onnx_opset_version": 9 + "onnx_opset_version": 14 } } diff --git a/examples/PyTorch/inception.png b/examples/PyTorch/inception.png index ed468c27f..a58290a49 100644 Binary files a/examples/PyTorch/inception.png and b/examples/PyTorch/inception.png differ diff --git a/examples/PyTorch/mlp_pure_mdf.json b/examples/PyTorch/mlp_pure_mdf.json index 181f5cb2d..fc1e251cb 100644 --- a/examples/PyTorch/mlp_pure_mdf.json +++ b/examples/PyTorch/mlp_pure_mdf.json @@ -1,7 +1,7 @@ { "mlp_pure_mdf": { "format": "ModECI MDF v0.4", - "generating_application": "Python modeci-mdf v0.4.8", + "generating_application": "Python modeci-mdf v0.4.9", "graphs": { "mlp_pure_mdf": { "nodes": { diff --git a/examples/PyTorch/mlp_pure_mdf.yaml b/examples/PyTorch/mlp_pure_mdf.yaml index 18c572d07..d412b2abf 100644 --- a/examples/PyTorch/mlp_pure_mdf.yaml +++ b/examples/PyTorch/mlp_pure_mdf.yaml @@ -1,6 +1,6 @@ mlp_pure_mdf: format: ModECI MDF v0.4 - generating_application: Python modeci-mdf v0.4.8 + generating_application: Python modeci-mdf v0.4.9 graphs: mlp_pure_mdf: nodes: diff --git a/examples/PyTorch/simple_pytorch_to_mdf.json b/examples/PyTorch/simple_pytorch_to_mdf.json index 8afbc08cc..26311e61f 100644 --- a/examples/PyTorch/simple_pytorch_to_mdf.json +++ b/examples/PyTorch/simple_pytorch_to_mdf.json @@ -1,7 +1,7 @@ { "SimpleNet": { "format": "ModECI MDF v0.4", - "generating_application": "Python modeci-mdf v0.4.8", + "generating_application": "Python modeci-mdf v0.4.9", "graphs": { "SimpleNetGraph": { "nodes": { @@ -416,6 +416,6 @@ } } }, - "onnx_opset_version": 9 + "onnx_opset_version": 14 } } diff --git a/examples/TensorFlow/Keras/IRIS/keras_model.py b/examples/TensorFlow/Keras/IRIS/keras_model.py index 56916d694..aa455382b 100644 --- a/examples/TensorFlow/Keras/IRIS/keras_model.py +++ b/examples/TensorFlow/Keras/IRIS/keras_model.py @@ -4,7 +4,7 @@ # tf.__version__ from keras.layers import Dense -from keras.utils.vis_utils import plot_model +from keras.utils import plot_model from keras.models import Sequential # from keras_visualizer import visualizer diff --git a/examples/TensorFlow/Keras/IRIS/keras_to_MDF.json b/examples/TensorFlow/Keras/IRIS/keras_to_MDF.json index e007a880d..cefe532af 100644 --- a/examples/TensorFlow/Keras/IRIS/keras_to_MDF.json +++ b/examples/TensorFlow/Keras/IRIS/keras_to_MDF.json @@ -1,7 +1,7 @@ { "Sequential": { "format": "ModECI MDF v0.4", - "generating_application": "Python modeci-mdf v0.4.8", + "generating_application": "Python modeci-mdf v0.4.9", "graphs": { "Sequential_graph": { "nodes": { diff --git a/examples/TensorFlow/Keras/IRIS/keras_to_MDF.yaml b/examples/TensorFlow/Keras/IRIS/keras_to_MDF.yaml index 13ef1c48d..ceb3053cb 100644 --- a/examples/TensorFlow/Keras/IRIS/keras_to_MDF.yaml +++ b/examples/TensorFlow/Keras/IRIS/keras_to_MDF.yaml @@ -1,6 +1,6 @@ Sequential: format: ModECI MDF v0.4 - generating_application: Python modeci-mdf v0.4.8 + generating_application: Python modeci-mdf v0.4.9 graphs: Sequential_graph: nodes: diff --git a/examples/TensorFlow/Keras/MNIST/keras_model.py b/examples/TensorFlow/Keras/MNIST/keras_model.py index e1eb79448..78bd8530c 100644 --- a/examples/TensorFlow/Keras/MNIST/keras_model.py +++ b/examples/TensorFlow/Keras/MNIST/keras_model.py @@ -4,7 +4,7 @@ # tf.__version__ from keras.layers import Dense -from keras.utils.vis_utils import plot_model +from keras.utils import plot_model from keras.models import Sequential # from keras_visualizer import visualizer diff --git a/examples/TensorFlow/Keras/MNIST/keras_to_MDF.json b/examples/TensorFlow/Keras/MNIST/keras_to_MDF.json index 61cca58b7..c5eaba1d0 100644 --- a/examples/TensorFlow/Keras/MNIST/keras_to_MDF.json +++ b/examples/TensorFlow/Keras/MNIST/keras_to_MDF.json @@ -1,7 +1,7 @@ { "Sequential": { "format": "ModECI MDF v0.4", - "generating_application": "Python modeci-mdf v0.4.8", + "generating_application": "Python modeci-mdf v0.4.9", "graphs": { "Sequential_graph": { "nodes": { diff --git a/examples/TensorFlow/Keras/MNIST/keras_to_MDF.yaml b/examples/TensorFlow/Keras/MNIST/keras_to_MDF.yaml index c1788fb88..362df6c7f 100644 --- a/examples/TensorFlow/Keras/MNIST/keras_to_MDF.yaml +++ b/examples/TensorFlow/Keras/MNIST/keras_to_MDF.yaml @@ -1,6 +1,6 @@ Sequential: format: ModECI MDF v0.4 - generating_application: Python modeci-mdf v0.4.8 + generating_application: Python modeci-mdf v0.4.9 graphs: Sequential_graph: nodes: diff --git a/setup.cfg b/setup.cfg index 1af6302ab..465a58432 100644 --- a/setup.cfg +++ b/setup.cfg @@ -16,16 +16,16 @@ classifiers = License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3) Natural Language :: English Operating System :: OS Independent - Programming Language :: Python :: 3.6 + Programming Language :: Python :: 3.8 Topic :: Scientific/Engineering Intended Audience :: Science/Research Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3 :: Only - Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 + Programming Language :: Python :: 3.11 Topic :: Scientific/Engineering Topic :: Software Development Typing :: Typed @@ -40,9 +40,9 @@ install_requires = matplotlib graphviz h5py - onnxruntime==1.13.1 - onnx==1.12.0 - skl2onnx==1.14.0 + onnxruntime + onnx + skl2onnx attrs>=21.1.0 cattrs modelspec<0.4,>=0.3.0 @@ -65,7 +65,7 @@ neuroml = tensorflow = - tensorflow + tensorflow>=2.11.0 keras_visualizer pydot @@ -89,14 +89,14 @@ optional = Jinja2<3.1 torchviz netron - torch>=1.11.0 - torchvision<=0.12.0 + torch<2.2.0,>=1.11.0 + torchvision h5py all_except_psyneulink = + modeci-mdf[tensorflow] modeci-mdf[optional] modeci-mdf[neuroml] - modeci-mdf[tensorflow] all = modeci-mdf[all_except_psyneulink] diff --git a/src/modeci_mdf/__init__.py b/src/modeci_mdf/__init__.py index 439992030..ffdd5b083 100644 --- a/src/modeci_mdf/__init__.py +++ b/src/modeci_mdf/__init__.py @@ -13,4 +13,4 @@ MODECI_MDF_VERSION = "0.4" # Version of the Python module. -__version__ = "0.4.8" +__version__ = "0.4.9" diff --git a/src/modeci_mdf/functions/onnx.py b/src/modeci_mdf/functions/onnx.py index 32a6e4c2a..72a833a28 100644 --- a/src/modeci_mdf/functions/onnx.py +++ b/src/modeci_mdf/functions/onnx.py @@ -125,9 +125,12 @@ def run_onnx_op( if op_name == "Pad": if "constant_value" in inputs: cval = inputs["constant_value"] - data = list(inputs.values())[0] - if cval.dtype != data.dtype: - inputs["constant_value"] = cval.astype(data.dtype) + if cval is None: + inputs.pop("constant_value") + else: + data = list(inputs.values())[0] + if cval.dtype != data.dtype: + inputs["constant_value"] = cval.astype(data.dtype) # SkLearn ONNX doesn't seem to support ConcatFromSequence, see # https://github.com/onnx/sklearn-onnx/issues/710 diff --git a/src/modeci_mdf/interfaces/onnx/exporter.py b/src/modeci_mdf/interfaces/onnx/exporter.py index cdbb20797..8ff9893da 100644 --- a/src/modeci_mdf/interfaces/onnx/exporter.py +++ b/src/modeci_mdf/interfaces/onnx/exporter.py @@ -12,6 +12,8 @@ from onnx import AttributeProto, TensorProto, GraphProto from onnx.defs import get_schema +import onnxruntime + from ast import literal_eval import argparse @@ -44,7 +46,14 @@ def mdf_to_onnx(mdf_model): onnx_graph = generate_onnx_graph(graph, nodenames_in_execution_order) # Make an onnx model from graph - onnx_model = helper.make_model(onnx_graph) + + # Check to see if onnxruntime version is less than 1.15, if so ir_version should + # be 8 for now. See: https://github.com/microsoft/onnxruntime/issues/15874 + make_model_kwargs = {} + if onnxruntime.__version__ < "1.15": + make_model_kwargs = {"ir_version": 8} + + onnx_model = helper.make_model(onnx_graph, **make_model_kwargs) # Infer shapes onnx_model = shape_inference.infer_shapes(onnx_model) diff --git a/src/modeci_mdf/interfaces/onnx/importer.py b/src/modeci_mdf/interfaces/onnx/importer.py index c9dcd567d..d47c54d3c 100644 --- a/src/modeci_mdf/interfaces/onnx/importer.py +++ b/src/modeci_mdf/interfaces/onnx/importer.py @@ -21,8 +21,12 @@ def id_to_port(id: str): """Turn unique ONNX output and input value names into valid MDF input and outport names""" + # Get rid of periods in names new_name = str(id).replace(".", "_") + # Get rid of slashes in names + new_name = str(id).replace("/", "_") + # Get rid of double colon in id names, this causes issues with execution engine. new_name = new_name.replace("::", "_") diff --git a/src/modeci_mdf/interfaces/pytorch/importer.py b/src/modeci_mdf/interfaces/pytorch/importer.py index efdfd63c5..31e4f7775 100644 --- a/src/modeci_mdf/interfaces/pytorch/importer.py +++ b/src/modeci_mdf/interfaces/pytorch/importer.py @@ -13,6 +13,18 @@ import torch +# We need to monkey patch the torch._C.Node class to add a __getitem__ method +# This is for torch 2.0 +# From https://github.com/openai/CLIP/issues/79#issuecomment-1624202950 +def _node_get(node: torch._C.Node, key: str): + """Gets attributes of a node which is polymorphic over return type.""" + sel = node.kindOf(key) + return getattr(node, sel)(key) + + +torch._C.Node.__getitem__ = _node_get + + from modeci_mdf.mdf import Model, Graph, Node, Edge, InputPort, OutputPort, Parameter from modeci_mdf.functions.onnx import onnx_opset_version as modeci_onnx_opset_version @@ -293,15 +305,15 @@ def torchnode_to_mdfnode( """ op = node.kind() - # Lookup the schema. For some reason we cannot just call node.schema(), it returns "(no schema)", huh? - # We need to do this the hard way. - schema = onnx.defs.get_schema(op.replace("onnx::", ""), modeci_onnx_opset_version) - # Exclude constants (as nodes) from the MDF graph. We will instead insert them as parameters to the nodes that # they project to. if op in ("prim::Constant", "onnx::Constant"): return None + # Lookup the schema. For some reason we cannot just call node.schema(), it returns "(no schema)", huh? + # We need to do this the hard way. + schema = onnx.defs.get_schema(op.replace("onnx::", ""), modeci_onnx_opset_version) + # If we are dealing with a loop node, we need to recursively create a sub-graph for the loop body if op == "onnx::Loop": sub_mdf_graph = Graph(id=f"LoopSubgraph{make_node_id(node)}") @@ -504,7 +516,7 @@ def pytorch_to_mdf( graph = None if use_onnx_ops: - operator_export_type = torch._C._onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK + operator_export_type = torch._C._onnx.OperatorExportTypes.ONNX else: operator_export_type = torch._C._onnx.OperatorExportTypes.RAW diff --git a/test_all.sh b/test_all.sh index 0310de6fa..dc0a74ca3 100755 --- a/test_all.sh +++ b/test_all.sh @@ -90,7 +90,7 @@ python addition.py ## Test Keras examples -cd ../Tensorflow/Keras +cd ../TensorFlow/Keras ./regenerate.sh ## Generate the docs diff --git a/tests/interfaces/onnx/test_importer.py b/tests/interfaces/onnx/test_importer.py index 107e58701..f4fdd037f 100644 --- a/tests/interfaces/onnx/test_importer.py +++ b/tests/interfaces/onnx/test_importer.py @@ -26,7 +26,7 @@ def test_ab(): mdf_executable = EvaluableGraph(mdf_model.graphs[0], verbose=False) mdf_executable.evaluate(initializer={"input": test_input}) - mdf_output = mdf_executable.enodes["Mul_3"].evaluable_outputs["_4"].curr_value + mdf_output = mdf_executable.enodes["/B/Mul"].evaluable_outputs["_4"].curr_value # Get the translated ONNX model onnx_models = mdf_to_onnx(mdf_model) diff --git a/tests/interfaces/pytorch/test_import.py b/tests/interfaces/pytorch/test_import.py index 493881dc5..b3f899cfe 100644 --- a/tests/interfaces/pytorch/test_import.py +++ b/tests/interfaces/pytorch/test_import.py @@ -26,13 +26,70 @@ def _get_torchvision_models(): Get all the backbone models in torch vision, suprised there is no function to do this in torchvision. """ + try: + + import torchvision.models + from torchvision.models import get_model_builder, list_models + + except ModuleNotFoundError: + pytest.mark.skip( + "Skipping PyTorch interface tests because pytorch is not installed." + ) + return [] + + def list_model_fns(module): + return [(name, get_model_builder(name)) for name in list_models(module)] + + # Copied from https://github.com/pytorch/vision/blob/main/test/test_models.py + skipped_big_models = { + "vit_h_14": {("Windows", "cpu"), ("Windows", "cuda")}, + "regnet_y_128gf": {("Windows", "cpu"), ("Windows", "cuda")}, + "mvit_v1_b": {("Windows", "cuda"), ("Linux", "cuda")}, + "mvit_v2_s": {("Windows", "cuda"), ("Linux", "cuda")}, + "swin_t": {}, + "swin_s": {}, + "swin_b": {}, + "swin_v2_t": {}, + "swin_v2_s": {}, + "swin_v2_b": {}, + } + + # Copied from https://github.com/pytorch/vision/blob/main/test/test_models.py + # speeding up slow models: + slow_models = [ + "convnext_base", + "convnext_large", + "resnext101_32x8d", + "resnext101_64x4d", + "wide_resnet101_2", + "efficientnet_b6", + "efficientnet_b7", + "efficientnet_v2_m", + "efficientnet_v2_l", + "regnet_y_16gf", + "regnet_y_32gf", + "regnet_y_128gf", + "regnet_x_16gf", + "regnet_x_32gf", + "swin_t", + "swin_s", + "swin_b", + "swin_v2_t", + "swin_v2_s", + "swin_v2_b", + ] + if models is None: return [] models_to_test = [] model_classes = set() - for model_name, model in models.__dict__.items(): + for model_name, model in list_model_fns(torchvision.models): try: + + if model_name in skipped_big_models: + continue + params = inspect.signature(model).parameters # Get the model class that this construction function returns. To cut down on tests, @@ -40,10 +97,8 @@ def _get_torchvision_models(): return_type = inspect.signature(model).return_annotation if ( - "weights" in params - or "pretrained" in params - and return_type not in model_classes - ): + "weights" in params or "pretrained" in params + ) and return_type not in model_classes: models_to_test.append(model) if return_type: model_classes.add(return_type) @@ -59,15 +114,30 @@ def _get_torchvision_models(): {"weights": None} if is_new_weights_api else {"pretrained": False} ) + xfails = { + "inception_v3": "Inception-V3 is failing to match currently.", + "maxvit_t": "MaxViT is failing because we are trying to call ast.parse on a string that is not valid python." + " Need to handle string arguments requried by einops.", + "resnet101": "Resnet101 is failing to match currently.", + "vit_": "ViT models are failing because PyTorch cant convert to ONNX the unflatten op.", + } + pytest_params = [] for model in models_to_test: - t = (model, model(**model_weights_spec)) - if model.__name__ == "inception_v3": + + if model.__name__ not in slow_models: + t = (model, model_weights_spec, torch.rand((1, 3, 224, 224))) + else: + t = (model, model_weights_spec, torch.rand((1, 3, 64, 64))) + + xf_models = [n for n in xfails.keys() if n in model.__name__] + if len(xf_models) > 0: + xf_reason = xfails[xf_models[0]] pytest_params.append( pytest.param( *t, marks=pytest.mark.xfail( - reason="Inception-V3 is failing to match currently." + reason=xf_reason, ), ) ) @@ -121,10 +191,10 @@ def _run_and_check_model(model, input=None): mdf_model2 = Model.from_json(mdf_model.to_json()) -@pytest.mark.parametrize("model_init, model", _get_torchvision_models()) -def test_torchvision_models(model_init, model): +@pytest.mark.parametrize("model_init, kwargs, input", _get_torchvision_models()) +def test_torchvision_models(model_init, kwargs, input): """Test importing the PyTorch model into MDF, executing in execution engine""" - _run_and_check_model(model) + _run_and_check_model(model_init(**kwargs), input=input) def test_simple_convolution(simple_convolution_pytorch): diff --git a/tests/test_onnx_functions.py b/tests/test_onnx_functions.py index 4e6fdbaa8..5fff86b27 100644 --- a/tests/test_onnx_functions.py +++ b/tests/test_onnx_functions.py @@ -106,3 +106,8 @@ def test_maxpool(): np.ones((1, 3, 32, 32)).astype(np.float32), kernel_shape=[2, 2] ) assert True + + +def test_randomuniform(): + """Test ONNX randomuniform function.""" + out = onnx_ops.randomuniform(low=-1.0, high=1.0, seed=0.0, shape=(1, 1)) diff --git a/tests/test_scheduler.py b/tests/test_scheduler.py index 4bd7bebaf..45b5356ad 100644 --- a/tests/test_scheduler.py +++ b/tests/test_scheduler.py @@ -69,7 +69,7 @@ def test_execution_engine_onnx(tmpdir): eg = modeci_mdf.execution_engine.main( "examples/ONNX/ab.%s" % mdf_format, array_format=array_format ) - output = eg.enodes["Mul_3"].evaluable_outputs["_4"].curr_value + output = eg.enodes["/B/Mul"].evaluable_outputs["_4"].curr_value assert np.array_equal(output, np.full((2, 3), 5))