diff --git a/Tutorials/pytorch-subgraphs/README.md b/Tutorials/pytorch-subgraphs/README.md index 2ec26444..6757093f 100644 --- a/Tutorials/pytorch-subgraphs/README.md +++ b/Tutorials/pytorch-subgraphs/README.md @@ -27,15 +27,11 @@ Author: Daniele Bagni, Xilinx Inc -### Current status -1. Tested with PyTorch 1.7.1 within [Vitis AI 2.0](https://github.com/Xilinx/Vitis-AI/tree/v2.0) on an Ubuntu 18.04.5 Desktop PC and tested in hardware on VCK190 Production board (``xilinx-vck190-dpu-v2021.2-v2.2.0.img.gz``) and ZCU102 board (``xilinx-zcu102-dpu-v2021.2-v2.0.0.img.gz``) both from the Vitis AI 2.0. +- Version: Vitis AI 2.5 +- Support: ZCU102 +- Last update: 21 Mar. 2023 -1. Tested with PyTorch 1.7.1 within [Vitis AI 2.5](https://github.com/Xilinx/Vitis-AI) on an Ubuntu 18.04.5 Desktop PC and tested in hardware on VCK190 Production board (``xilinx-vck190-dpu-v2022.1-v2.5.0.img.gz``) and ZCU102 board (``xilinx-zcu102-dpu-v2022.1-v2.5.0.img.gz``) both from the Vitis AI 2.5. - - - -#### Date: 20 June 2022 diff --git a/Tutorials/pytorch-subgraphs/files/application/main_subgraphs.py b/Tutorials/pytorch-subgraphs/files/application/main_subgraphs.py index 6954e165..b626c02c 100644 --- a/Tutorials/pytorch-subgraphs/files/application/main_subgraphs.py +++ b/Tutorials/pytorch-subgraphs/files/application/main_subgraphs.py @@ -18,8 +18,7 @@ """ """ -Author: Daniele Bagni & Jon Cory -date: 20 June 2022 +date: 20 Mar 2023 """ from ctypes import * @@ -116,33 +115,6 @@ def Linear(xx): np.save('cnn_out.bin', y) return y -""" -# yet to be checked -def LeakyReLU(K, x): - data = np.asarray( x, dtype="float32" ) - print("LKRE inp shape ", data.shape) - size = data[0].size - shape = data[0].shape - d = data[0].reshape([1, size]) - print("LKRE inp: ", d) - pos_index = (d >= 0); - neg_index = (d < 0); Linear(out6) - coef = np.float32(K) - y_n = d[np.ix_(neg_index)]*coef - y_p = d[np.ix_(pos_index)] - y = [y_n, y_p] - print("LKRE out: ", y) - x[0] = y.reshape([shape]) - return x -""" - -def fix2float(fix_point, value): - return value.astype(np.float32) * np.exp2(fix_point, dtype=np.float32) - - -def float2fix(fix_point, value): - return value.astype(np.float32) / np.exp2(fix_point, dtype=np.float32) - def execute_async(dpu, tensor_buffers_dict): input_tensor_buffers = [