diff --git a/.cirrus.yml b/.cirrus.yml index 112afe352c..a7f64255d1 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -125,9 +125,9 @@ task: - make USE_OPENMP=1 FreeBSD_task: - name: FreeBSD-gcc12 + name: FreeBSD-gcc freebsd_instance: - image_family: freebsd-13-3 + image_family: freebsd-14-1 install_script: - pkg update -f && pkg upgrade -y && pkg install -y gmake gcc compile_script: @@ -136,9 +136,9 @@ FreeBSD_task: FreeBSD_task: - name: freebsd-gcc12-ilp64 + name: freebsd-gcc-ilp64 freebsd_instance: - image_family: freebsd-13-3 + image_family: freebsd-14-1 install_script: - pkg update -f && pkg upgrade -y && pkg install -y gmake gcc compile_script: @@ -148,7 +148,7 @@ FreeBSD_task: FreeBSD_task: name: FreeBSD-clang-openmp freebsd_instance: - image_family: freebsd-13-3 + image_family: freebsd-14-1 install_script: - pkg update -f && pkg upgrade -y && pkg install -y gmake gcc - ln -s /usr/local/lib/gcc13/libgfortran.so.5.0.0 /usr/lib/libgfortran.so diff --git a/.github/workflows/c910v.yml b/.github/workflows/c910v.yml index a47ca1dce6..1dd3a2c713 100644 --- a/.github/workflows/c910v.yml +++ b/.github/workflows/c910v.yml @@ -37,7 +37,7 @@ jobs: run: | sudo apt-get update sudo apt-get install autoconf automake autotools-dev ninja-build make ccache \ - gcc-${{ matrix.apt_triple }} gfortran-${{ matrix.apt_triple }} libgomp1-riscv64-cross + gcc-${{ matrix.apt_triple }} gfortran-${{ matrix.apt_triple }} libgomp1-riscv64-cross libglib2.0-dev - name: checkout qemu uses: actions/checkout@v3 @@ -52,6 +52,7 @@ jobs: wget https://github.com/revyos/qemu/commit/5164bca5a4bcde4534dc1a9aa3a7f619719874cf.patch cd qemu patch -p1 < ../5164bca5a4bcde4534dc1a9aa3a7f619719874cf.patch + export CXXFLAGS="-Wno-error"; export CFLAGS="-Wno-error" ./configure --prefix=$GITHUB_WORKSPACE/qemu-install --target-list=riscv64-linux-user --disable-system make -j$(nproc) make install diff --git a/.github/workflows/codspeed-bench.yml b/.github/workflows/codspeed-bench.yml index 25e196ef2a..94e0d708ed 100644 --- a/.github/workflows/codspeed-bench.yml +++ b/.github/workflows/codspeed-bench.yml @@ -15,7 +15,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest] + os: [ubuntu-22.04] fortran: [gfortran] build: [make] pyver: ["3.12"] @@ -147,7 +147,7 @@ jobs: OPENBLAS_NUM_THREADS=1 pytest benchmarks/bench_blas.py -k 'gesdd' - name: Run benchmarks - uses: CodSpeedHQ/action@v2 + uses: CodSpeedHQ/action@v3 with: token: ${{ secrets.CODSPEED_TOKEN }} run: | diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index da40b853f0..391183d1cd 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -23,7 +23,7 @@ jobs: python-version: "3.10" - name: Install MkDocs and doc theme packages - run: pip install mkdocs mkdocs-material mkdocs-git-revision-date-localized-plugin + run: pip install mkdocs mkdocs-material mkdocs-git-revision-date-localized-plugin mkdocs-mermaid2-plugin - name: Build docs site run: mkdocs build diff --git a/.github/workflows/dynamic_arch.yml b/.github/workflows/dynamic_arch.yml index 669aa81168..b388cb1b26 100644 --- a/.github/workflows/dynamic_arch.yml +++ b/.github/workflows/dynamic_arch.yml @@ -43,7 +43,9 @@ jobs: run: | if [ "$RUNNER_OS" == "Linux" ]; then sudo apt-get update - sudo apt-get install -y gfortran cmake ccache libtinfo5 + sudo apt-get install -y gfortran cmake ccache + wget http://security.ubuntu.com/ubuntu/pool/universe/n/ncurses/libtinfo5_6.3-2ubuntu0.1_amd64.deb + sudo apt install ./libtinfo5_6.3-2ubuntu0.1_amd64.deb elif [ "$RUNNER_OS" == "macOS" ]; then # It looks like "gfortran" isn't working correctly unless "gcc" is re-installed. brew reinstall gcc @@ -158,7 +160,7 @@ jobs: strategy: fail-fast: false matrix: - msystem: [UCRT64, MINGW32, CLANG64, CLANG32] + msystem: [UCRT64, MINGW32, CLANG64] idx: [int32, int64] build-type: [Release] include: @@ -174,14 +176,6 @@ jobs: idx: int32 target-prefix: mingw-w64-clang-x86_64 fc-pkg: fc - # Compiling with Flang 16 seems to cause test errors on machines - # with AVX512 instructions. Revisit after MSYS2 distributes Flang 17. - no-avx512-flags: -DNO_AVX512=1 - - msystem: CLANG32 - idx: int32 - target-prefix: mingw-w64-clang-i686 - fc-pkg: cc - c-lapack-flags: -DC_LAPACK=ON - msystem: UCRT64 idx: int64 idx64-flags: -DBINARY=64 -DINTERFACE64=1 @@ -192,9 +186,6 @@ jobs: idx64-flags: -DBINARY=64 -DINTERFACE64=1 target-prefix: mingw-w64-clang-x86_64 fc-pkg: fc - # Compiling with Flang 16 seems to cause test errors on machines - # with AVX512 instructions. Revisit after MSYS2 distributes Flang 17. - no-avx512-flags: -DNO_AVX512=1 - msystem: UCRT64 idx: int32 target-prefix: mingw-w64-ucrt-x86_64 @@ -203,8 +194,6 @@ jobs: exclude: - msystem: MINGW32 idx: int64 - - msystem: CLANG32 - idx: int64 defaults: run: @@ -280,8 +269,6 @@ jobs: -DNUM_THREADS=64 \ -DTARGET=CORE2 \ ${{ matrix.idx64-flags }} \ - ${{ matrix.c-lapack-flags }} \ - ${{ matrix.no-avx512-flags }} \ -DCMAKE_C_COMPILER_LAUNCHER=ccache \ -DCMAKE_Fortran_COMPILER_LAUNCHER=ccache \ .. @@ -369,3 +356,23 @@ jobs: - name: Build OpenBLAS run: | make -j$(nproc) HOSTCC="ccache gcc" CC="ccache ${{ matrix.triple }}-gcc" FC="ccache ${{ matrix.triple }}-gfortran" ARCH=${{ matrix.target }} ${{ matrix.opts }} + + neoverse_build: + if: "github.repository == 'OpenMathLib/OpenBLAS'" + runs-on: ubuntu-24.04-arm + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Install Dependencies + run: | + sudo apt-get update + sudo apt-get install -y gcc gfortran make + + - name: Build OpenBLAS + run: | + make -j${nproc} TARGET=NEOVERSEN2 + make -j${nproc} TARGET=NEOVERSEN2 lapack-test + + diff --git a/.github/workflows/harmonyos.yml b/.github/workflows/harmonyos.yml new file mode 100644 index 0000000000..118fe6300e --- /dev/null +++ b/.github/workflows/harmonyos.yml @@ -0,0 +1,37 @@ +name: harmonyos + +on: [push, pull_request] + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +permissions: + contents: read # to fetch code (actions/checkout) + +jobs: + build: + if: "github.repository == 'OpenMathLib/OpenBLAS'" + runs-on: ubuntu-latest + env: + OHOS_NDK_CMAKE: $GITHUB_WORKSPACE/ohos-sdk/linux/native/build-tools/cmake/bin/cmake + COMMON_CMAKE_OPTIONS: | + -DCMAKE_TOOLCHAIN_FILE=$GITHUB_WORKSPACE/ohos-sdk/linux/native/build/cmake/ohos.toolchain.cmake \ + -DCMAKE_INSTALL_PREFIX=install \ + -DCMAKE_BUILD_TYPE=Release \ + steps: + - uses: actions/checkout@v4 + - name: ndk-install + run: | + wget https://repo.huaweicloud.com/harmonyos/os/4.1.1-Release/ohos-sdk-windows_linux-public.tar.gz + tar -xf ohos-sdk-windows_linux-public.tar.gz + cd ohos-sdk/linux + unzip -q native-linux-x64-4.1.7.8-Release.zip + cd - + - name: build-armv8 + run: | + mkdir build && cd build + ${{ env.OHOS_NDK_CMAKE }} ${{ env.COMMON_CMAKE_OPTIONS }} -DOHOS_ARCH="arm64-v8a" \ + -DTARGET=ARMV8 -DNOFORTRAN=1 .. + ${{ env.OHOS_NDK_CMAKE }} --build . -j $(nproc) + diff --git a/.github/workflows/loongarch64_clang.yml b/.github/workflows/loongarch64_clang.yml index f1a75ad343..fdb48309b9 100644 --- a/.github/workflows/loongarch64_clang.yml +++ b/.github/workflows/loongarch64_clang.yml @@ -41,7 +41,7 @@ jobs: - name: Install APT deps run: | sudo apt-get update - sudo apt-get install autoconf automake autotools-dev ninja-build make ccache + sudo apt-get install autoconf automake autotools-dev ninja-build make ccache libglib2.0-dev - name: Download and install loongarch64-toolchain run: | diff --git a/.github/workflows/mips64.yml b/.github/workflows/mips64.yml index 1491aff78b..bad7bf85e1 100644 --- a/.github/workflows/mips64.yml +++ b/.github/workflows/mips64.yml @@ -41,14 +41,14 @@ jobs: run: | sudo apt-get update sudo apt-get install autoconf automake autotools-dev ninja-build make ccache \ - gcc-${{ matrix.triple }} gfortran-${{ matrix.triple }} libgomp1-mips64el-cross + gcc-${{ matrix.triple }} gfortran-${{ matrix.triple }} libgomp1-mips64el-cross libglib2.0-dev - name: checkout qemu uses: actions/checkout@v3 with: repository: qemu/qemu path: qemu - ref: 79dfa177ae348bb5ab5f97c0915359b13d6186e2 + ref: ae35f033b874c627d81d51070187fbf55f0bf1a7 - name: build qemu run: | diff --git a/CMakeLists.txt b/CMakeLists.txt index ddff73c2cd..8e99bd208f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -4,11 +4,12 @@ cmake_minimum_required(VERSION 3.16.0) +set (CMAKE_ASM_SOURCE_FILE_EXTENSIONS "S") project(OpenBLAS C ASM) set(OpenBLAS_MAJOR_VERSION 0) set(OpenBLAS_MINOR_VERSION 3) -set(OpenBLAS_PATCH_VERSION 28.dev) +set(OpenBLAS_PATCH_VERSION 29.dev) set(OpenBLAS_VERSION "${OpenBLAS_MAJOR_VERSION}.${OpenBLAS_MINOR_VERSION}.${OpenBLAS_PATCH_VERSION}") diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index a6d25b50bd..d7e75bb976 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -229,3 +229,14 @@ In chronological order: * Christopher Daley * [2024-01-24] Optimize GEMV forwarding on ARM64 systems + +* Aniket P. Garade Sushil Pratap Singh Juliya James + * [2024-12-13] Optimized swap and rot Level-1 BLAS routines with ARM SVE + +* Annop Wongwathanarat + * [2025-01-10] Add thread throttling profile for SGEMM on NEOVERSEV1 + * [2025-01-21] Optimize gemv_t_sve_v1x3 kernel + +* Marek Michalowski + * [2025-01-21] Add thread throttling profile for SGEMV on `NEOVERSEV1` + diff --git a/Changelog.txt b/Changelog.txt index 7f89a2eab7..b52734c82c 100644 --- a/Changelog.txt +++ b/Changelog.txt @@ -1,4 +1,99 @@ OpenBLAS ChangeLog +==================================================================== +Version 0.3.29 +12-Jan-2025 + +general: + - fixed a potential NULL pointer dereference in multithreaded builds + - added function aliases for GEMMT using its new name GEMMTR adopted by Reference-BLAS + - fixed a build failure when building without LAPACK_DEPRECATED functions + - the minimum required CMake version for CMake-based builds was raised to 3.16.0 in order + to remove many compatibility and deprecation warnings + - added more detailed CMake rules for OpenMP builds (mainly to support recent LLVM) + - fixed the behavior of the recently added CBLAS_?GEMMT functions with row-major data + - improved thread scaling of multithreaded SBGEMV + - improved thread scaling of multithreaded TRTRI + - fixed compilation of the CBLAS testsuite with gcc14 (and no Fortran compiler) + - added support for option handling changes in flang-new from LLVM18 onwards + - added support for recent calling conventions changes in Cray and NVIDIA compilers + - added support for compilation with the NAG Fortran compiler + - fixed placement of the -fopenmp flag and libsuffix in the generated pkgconfig file + - improved the CMakeConfig file generated by the Makefile build + - fixed const-correctness of cblas_?geadd in cblas.h + - fixed a potential inaccuracy in multithreaded BLAS3 calls + - fixed empty implementations of get/set_affinity that print a warning in OpenMP builds + - fixed function signatures for TRTRS in the converted C version of LAPACK + - fixed omission of several single-precision LAPACK symbols in the shared library + - improved build instructions for the provided "pybench" benchmarks + - improved documentation, including added build instructions for WoA and HarmonyOS + as well as descriptions of environment variables that affect build and runtime behavior + - added a separate "make install_tests" target for use with cross-compilations + - integrated improvements and corrections from Reference-LAPACK: + - removed a comparison in LAPACKE ?tpmqrt that is always false (LAPACK PR 1062) + - fixed the leading dimension for B in tests for GGEV (LAPACK PR 1064) + - replaced the ?LARFT functions with a recursive implementation (LAPACK PR 1080) + +arm: + - fixed build with recent versions of the NDK (missing .type declaration of symbols) + +arm64: + - fixed a long-standing bug in the (generic) c/zgemm_beta kernel that could lead to + reads and writes outside the array bounds in some circumstances + - rewrote cpu autodetection to scan all cores and return the highest performing type + - improved the DGEMM performance for SVE targets and small matrix sizes + - improved dimension criteria for forwarding from GEMM to GEMV kernels + - added SVE kernels for ROT and SWAP + - improved SVE kernels for SGEMV and DGEMV on A64FX and NEOVERSEV1 + - added support for using the "small matrix" kernels with CMake as well + - fixed compilation on Windows on Arm + - improved compile-time detection of SVE capability + - added cpu autodetection and initial support for Apple M4 + - added support for compilation on systems running IOS + - added support for compilation on NetBSD ("evbarm" architecture) + - fixed NRM2 implementations for generic SVE targets and the Neoverse N2 + - fixed compilation for SVE-capable targets with the NVIDIA compiler + +x86_64: + - fixed a wrong storage size in the SBGEMV kernel for Cooper Lake + - added cpu autodetection for Intel Granite Rapids + - added cpu autodetection for AMD Ryzen 5 series + - added optimized SOMATCOPY_CT for AVX-capable targets + - fixed the fallback implementation of GEMM3M in GENERIC builds + - tentatively re-enabled builds with the EXPRECISION option + - worked around a miscompilation of tests with mingw32-gfortran14 + - added support for compilation with the Intel oneAPI 2025.0 compiler on Windows + +power: + - fixed multithreaded SBGEMM + - fixed a CMake build problem on POWER10 + - improved the performance of SGEMV + - added vectorized implementations of SBGEMV and support for forwarding 1xN SBGEMM to them + - fixed illegal instructions and potential memory overflow in SGEMM on PPCG4 + - fixed handling of NaN and Inf arguments in SSCAL and DSCAL on PPC440,G4 and 970 + - added improved CGEMM and ZGEMM kernels for POWER10 + - added Makefile logic to remove all optimization flags in DEBUG builds + +mips64: + - fixed compilation with gcc14 + - fixed GEMM parameter selection for the MIPS64_GENERIC target + - fixed a potential build failure when compiling with OpenMP + +loongarch64: + - fixed compilation for Loongson3 with recent versions of gmake + - fixed a potential loss of precision in Loongson3A GEMM + - fixed a potential build failure when compiling with OpenMP + - added optimized SOMATCOPY for LASX-capable targets + - introduced a new cpu naming scheme while retaining compatibility + - added support for cross-compiling Loongarch64 targets with CMake + - added support for compilation with LLVM + +riscv64: + - removed thread yielding overhead caused by sched_yield + - replaced some non-standard intrinsics with their official names + - fixed and sped up the implementations of CGEMM/ZGEMM TCOPY for vector lenghts 128 and 256 + - improved the performance of SNRM2/DNRM2 for RVV1.0 targets + - added optimized ?OMATCOPY_CN kernels for RVV1.0 targets + ==================================================================== Version 0.3.28 8-Aug-2024 diff --git a/Makefile b/Makefile index 78f82dea59..4c72177343 100644 --- a/Makefile +++ b/Makefile @@ -426,6 +426,9 @@ dummy : install : $(MAKE) -f Makefile.install install +install_tests : + $(MAKE) -f Makefile.install install_tests + clean :: @for d in $(SUBDIRS_ALL) ; \ do if test -d $$d; then \ diff --git a/Makefile.arm64 b/Makefile.arm64 index 46e4baefc4..93c1376694 100644 --- a/Makefile.arm64 +++ b/Makefile.arm64 @@ -356,4 +356,31 @@ endif endif +else +# NVIDIA HPC options necessary to enable SVE in the compiler +ifeq ($(CORE), THUNDERX2T99) +CCOMMON_OPT += -tp=thunderx2t99 +FCOMMON_OPT += -tp=thunderx2t99 +endif +ifeq ($(CORE), NEOVERSEN1) +CCOMMON_OPT += -tp=neoverse-n1 +FCOMMON_OPT += -tp=neoverse-n1 +endif +ifeq ($(CORE), NEOVERSEV1) +CCOMMON_OPT += -tp=neoverse-v1 +FCOMMON_OPT += -tp=neoverse-v1 +endif +ifeq ($(CORE), NEOVERSEV2) +CCOMMON_OPT += -tp=neoverse-v2 +FCOMMON_OPT += -tp=neoverse-v2 +endif +ifeq ($(CORE), ARMV8SVE) +CCOMMON_OPT += -tp=neoverse-v2 +FCOMMON_OPT += -tp=neoverse-v2 +endif +ifeq ($(CORE), ARMV9SVE) +CCOMMON_OPT += -tp=neoverse-v2 +FCOMMON_OPT += -tp=neoverse-v2 +endif + endif diff --git a/Makefile.install b/Makefile.install index 129ed9a137..10e6425cce 100644 --- a/Makefile.install +++ b/Makefile.install @@ -191,22 +191,29 @@ endif #Generating OpenBLASConfig.cmake @echo Generating $(OPENBLAS_CMAKE_CONFIG) in $(DESTDIR)$(OPENBLAS_CMAKE_DIR) @echo "SET(OpenBLAS_VERSION \"${VERSION}\")" > "$(DESTDIR)$(OPENBLAS_CMAKE_DIR)/$(OPENBLAS_CMAKE_CONFIG)" - @echo "SET(OpenBLAS_INCLUDE_DIRS ${OPENBLAS_INCLUDE_DIR})" >> "$(DESTDIR)$(OPENBLAS_CMAKE_DIR)/$(OPENBLAS_CMAKE_CONFIG)" + @echo "file(REAL_PATH \"../../..\" _OpenBLAS_ROOT_DIR BASE_DIRECTORY \$${CMAKE_CURRENT_LIST_DIR} )" >> "$(DESTDIR)$(OPENBLAS_CMAKE_DIR)/$(OPENBLAS_CMAKE_CONFIG)" + @echo "SET(OpenBLAS_INCLUDE_DIRS \$${_OpenBLAS_ROOT_DIR}/include)" >> "$(DESTDIR)$(OPENBLAS_CMAKE_DIR)/$(OPENBLAS_CMAKE_CONFIG)" ifneq ($(NO_SHARED),1) #ifeq logical or ifeq ($(OSNAME), $(filter $(OSNAME),Linux FreeBSD NetBSD OpenBSD DragonFly)) - @echo "SET(OpenBLAS_LIBRARIES ${OPENBLAS_LIBRARY_DIR}/$(LIBPREFIX)$(SYMBOLSUFFIX).so)" >> "$(DESTDIR)$(OPENBLAS_CMAKE_DIR)/$(OPENBLAS_CMAKE_CONFIG)" + @echo "SET(OpenBLAS_LIBRARIES \$${_OpenBLAS_ROOT_DIR}/lib/$(LIBPREFIX).so)" >> "$(DESTDIR)$(OPENBLAS_CMAKE_DIR)/$(OPENBLAS_CMAKE_CONFIG)" endif ifeq ($(OSNAME), $(filter $(OSNAME),WINNT CYGWIN_NT)) - @echo "SET(OpenBLAS_LIBRARIES ${OPENBLAS_BINARY_DIR}/$(LIBDLLNAME))" >> "$(DESTDIR)$(OPENBLAS_CMAKE_DIR)/$(OPENBLAS_CMAKE_CONFIG)" + @echo "SET(OpenBLAS_LIBRARIES \$${_OpenBLAS_ROOT_DIR}/bin/$(LIBDLLNAME))" >> "$(DESTDIR)$(OPENBLAS_CMAKE_DIR)/$(OPENBLAS_CMAKE_CONFIG)" endif ifeq ($(OSNAME), Darwin) - @echo "SET(OpenBLAS_LIBRARIES ${OPENBLAS_LIBRARY_DIR}/$(LIBPREFIX).dylib)" >> "$(DESTDIR)$(OPENBLAS_CMAKE_DIR)/$(OPENBLAS_CMAKE_CONFIG)" + @echo "SET(OpenBLAS_LIBRARIES \$${_OpenBLAS_ROOT_DIR}/lib/$(LIBPREFIX).dylib)" >> "$(DESTDIR)$(OPENBLAS_CMAKE_DIR)/$(OPENBLAS_CMAKE_CONFIG)" +endif + @echo "add_library(OpenBLAS::OpenBLAS SHARED IMPORTED)" + @echo "target_include_directories(OpenBLAS::OpenBLAS INTERFACE \$${OpenBLAS_INCLUDE_DIRS})" +ifeq ($(OSNAME), $(filter $(OSNAME),WINNT CYGWIN_NT)) + @echo "set_property(TARGET OpenBLAS::OpenBLAS PROPERTY IMPORTED_LOCATION \$${OpenBLAS_LIBRARIES})" + @echo "set_property(TARGET OpenBLAS::OpenBLAS PROPERTY IMPORTED_IMPLIB \$${_OpenBLAS_ROOT_DIR}/lib/libopenblas.lib)" endif else #only static - @echo "SET(OpenBLAS_LIBRARIES ${OPENBLAS_LIBRARY_DIR}/$(LIBPREFIX).$(LIBSUFFIX))" >> "$(DESTDIR)$(OPENBLAS_CMAKE_DIR)/$(OPENBLAS_CMAKE_CONFIG)" + @echo "SET(OpenBLAS_LIBRARIES \$${_OpenBLAS_ROOT_DIR}/lib/$(LIBPREFIX).$(LIBSUFFIX))" >> "$(DESTDIR)$(OPENBLAS_CMAKE_DIR)/$(OPENBLAS_CMAKE_CONFIG)" endif #Generating OpenBLASConfigVersion.cmake @echo Generating $(OPENBLAS_CMAKE_CONFIG_VERSION) in $(DESTDIR)$(OPENBLAS_CMAKE_DIR) @@ -220,3 +227,96 @@ endif @echo " endif ()" >> "$(DESTDIR)$(OPENBLAS_CMAKE_DIR)/$(OPENBLAS_CMAKE_CONFIG_VERSION)" @echo "endif ()" >> "$(DESTDIR)$(OPENBLAS_CMAKE_DIR)/$(OPENBLAS_CMAKE_CONFIG_VERSION)" @echo Install OK! + +install_tests : lib.grd +ifneq ($(ONLY_CBLAS), 1) + @install -m 666 utest/openblas_utest $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 utest/openblas_utest_ext $(DESTDIR)$(OPENBLAS_BINARY_DIR) +ifeq ($(NOFORTRAN), $(filter 0,$(NOFORTRAN))) +ifndef NO_FBLAS +ifeq ($(BUILD_BFLOAT16),1) + @install -m 666 test/test_sbgemm $(DESTDIR)$(OPENBLAS_BINARY_DIR) +endif +ifeq ($(BUILD_SINGLE),1) + @install -m 666 test/sblat1 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 test/sblat2 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 test/sblat3 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 test/sblat2.dat $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 test/sblat3.dat $(DESTDIR)$(OPENBLAS_BINARY_DIR) +endif +ifeq ($(BUILD_DOUBLE),1) + @install -m 666 test/dblat1 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 test/dblat2 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 test/dblat3 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 test/dblat2.dat $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 test/dblat3.dat $(DESTDIR)$(OPENBLAS_BINARY_DIR) +endif +ifeq ($(BUILD_COMPLEX),1) + @install -m 666 test/cblat1 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 test/cblat2 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 test/cblat3 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 test/cblat2.dat $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 test/cblat3.dat $(DESTDIR)$(OPENBLAS_BINARY_DIR) +ifeq ($(ARCH), filter($(ARCH), x86 x86_64 ia64 MIPS)) + @install -m 666 test/cblat3_3m $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 test/cblat3_3m.dat $(DESTDIR)$(OPENBLAS_BINARY_DIR) +endif +endif +ifeq ($(BUILD_COMPLEX16),1) + @install -m 666 test/zblat1 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 test/zblat2 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 test/zblat3 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 test/zblat2.dat $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 test/zblat3.dat $(DESTDIR)$(OPENBLAS_BINARY_DIR) +ifeq ($(ARCH), filter($(ARCH), x86 x86_64 ia64 MIPS)) + @install -m 666 test/zblat3_3m $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 test/zblat3_3m.dat $(DESTDIR)$(OPENBLAS_BINARY_DIR) +endif +endif +endif +endif +ifneq ($(ONLY_CBLAS), 1) +ifeq ($(BUILD_SINGLE),1) + @install -m 666 ctest/xscblat1 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 ctest/xscblat2 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 ctest/xscblat3 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 ctest/sin2 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 ctest/sin3 $(DESTDIR)$(OPENBLAS_BINARY_DIR) +endif +ifeq ($(BUILD_DOUBLE),1) + @install -m 666 ctest/xdcblat1 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 ctest/xdcblat2 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 ctest/xdcblat3 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 ctest/din2 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 ctest/din3 $(DESTDIR)$(OPENBLAS_BINARY_DIR) +endif +ifeq ($(BUILD_COMPLEX),1) + @install -m 666 ctest/xccblat1 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 ctest/xccblat2 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 ctest/xccblat3 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 ctest/cin2 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 ctest/cin3 $(DESTDIR)$(OPENBLAS_BINARY_DIR) +ifeq ($(ARCH), filter($(ARCH), x86 x86_64 ia64 MIPS)) + @install -m 666 ctest/xccblat3_3m $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 ctest/cin3_3m $(DESTDIR)$(OPENBLAS_BINARY_DIR) +endif +endif +ifeq ($(BUILD_COMPLEX16),1) + @install -m 666 ctest/xzcblat1 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 ctest/xzcblat2 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 ctest/xzcblat3 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 ctest/zin2 $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 ctest/zin3 $(DESTDIR)$(OPENBLAS_BINARY_DIR) +ifeq ($(ARCH), filter($(ARCH), x86 x86_64 ia64 MIPS)) + @install -m 666 ctest/xzcblat3_3m $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 ctest/zin3_3m $(DESTDIR)$(OPENBLAS_BINARY_DIR) +endif +endif + +endif +ifeq ($(CPP_THREAD_SAFETY_TEST), 1) + @install -m 666 cpp_thread_test/dgemm_tester $(DESTDIR)$(OPENBLAS_BINARY_DIR) + @install -m 666 cpp_thread_test/dgemv_tester $(DESTDIR)$(OPENBLAS_BINARY_DIR) +endif +endif + diff --git a/Makefile.riscv64 b/Makefile.riscv64 index 9f6e48b7ad..0ee26c1b5c 100644 --- a/Makefile.riscv64 +++ b/Makefile.riscv64 @@ -3,7 +3,7 @@ CCOMMON_OPT += -march=rv64imafdcv0p7_zfh_xtheadc -mabi=lp64d -mtune=c920 FCOMMON_OPT += -march=rv64imafdcv0p7_zfh_xtheadc -mabi=lp64d -mtune=c920 -static endif ifeq ($(CORE), x280) -CCOMMON_OPT += -march=rv64imafdcv_zba_zbb_zfh_zvl512b -mabi=lp64d -ffast-math +CCOMMON_OPT += -march=rv64imafdcv_zba_zbb_zfh_zvl512b -mabi=lp64d FCOMMON_OPT += -march=rv64imafdcv_zba_zbb_zfh -mabi=lp64d -static endif ifeq ($(CORE), RISCV64_ZVL256B) diff --git a/Makefile.rule b/Makefile.rule index e57388844a..1472ed938b 100644 --- a/Makefile.rule +++ b/Makefile.rule @@ -3,7 +3,7 @@ # # This library's version -VERSION = 0.3.28.dev +VERSION = 0.3.29.dev # If you set this prefix, the library name will be lib$(LIBNAMESUFFIX)openblas.a # and lib$(LIBNAMESUFFIX)openblas.so, with a matching soname in the shared library diff --git a/Makefile.system b/Makefile.system index 14830eb4e2..d6dd9e9608 100644 --- a/Makefile.system +++ b/Makefile.system @@ -447,7 +447,7 @@ endif ifeq ($(OSNAME), Linux) EXTRALIB += -lm -NO_EXPRECISION = 1 +#NO_EXPRECISION = 1 endif ifeq ($(OSNAME), Android) @@ -573,7 +573,7 @@ NO_BINARY_MODE = 1 endif ifeq ($(CORE), generic) -NO_EXPRECISION = 1 +#NO_EXPRECISION = 1 endif ifndef NO_EXPRECISION @@ -596,7 +596,7 @@ endif ifeq ($(ARCH), x86_64) ifeq ($(CORE), generic) -NO_EXPRECISION = 1 +#NO_EXPRECISION = 1 endif ifndef NO_EXPRECISION @@ -832,8 +832,8 @@ BINARY_DEFINED = 1 ifeq ($(F_COMPILER), GFORTRAN) ifeq ($(C_COMPILER), GCC) -# EXPRECISION = 1 -# CCOMMON_OPT += -DEXPRECISION +EXPRECISION = 1 +CCOMMON_OPT += -DEXPRECISION endif endif endif @@ -1396,17 +1396,15 @@ endif endif ifeq ($(F_COMPILER), CRAY) -CCOMMON_OPT += -DF_INTERFACE_CRAYFC +CCOMMON_OPT += -DF_INTERFACE_INTEL FCOMMON_OPT += -hnopattern ifdef INTERFACE64 ifneq ($(INTERFACE64), 0) FCOMMON_OPT += -s integer64 endif endif -ifeq ($(USE_OPENMP), 1) -FCOMMON_OPT += -fopenmp -else -FCOMMON_OPT += -fno-openmp +ifneq ($(USE_OPENMP), 1) +FCOMMON_OPT += -O noomp endif endif diff --git a/README.md b/README.md index a31588be02..cc9325d39d 100644 --- a/README.md +++ b/README.md @@ -15,11 +15,14 @@ OSUOSL IBMZ-CI [![Build Status](http://ibmz-ci.osuosl.org/buildStatus/icon?job=O OpenBLAS is an optimized BLAS (Basic Linear Algebra Subprograms) library based on GotoBLAS2 1.13 BSD version. -Please read the documentation in the OpenBLAS folder: . +For more information about OpenBLAS, please see: + +- The documentation at [openmathlib.org/OpenBLAS/docs/](http://www.openmathlib.org/OpenBLAS/docs), +- The home page at [openmathlib.org/OpenBLAS/](http://www.openmathlib.org/OpenBLAS). For a general introduction to the BLAS routines, please refer to the extensive documentation of their reference implementation hosted at netlib: . On that site you will likewise find documentation for the reference implementation of the higher-level library LAPACK - the **L**inear **A**lgebra **Pack**age that comes included with OpenBLAS. If you are looking for a general primer or refresher on Linear Algebra, the set of six -20-minute lecture videos by Prof. Gilbert Strang on either MIT OpenCourseWare or Youtube may be helpful. +20-minute lecture videos by Prof. Gilbert Strang on either MIT OpenCourseWare [here](https://ocw.mit.edu/resources/res-18-010-a-2020-vision-of-linear-algebra-spring-2020/) or YouTube [here](https://www.youtube.com/playlist?list=PLUl4u3cNGP61iQEFiWLE21EJCxwmWvvek) may be helpful. ## Binary Packages @@ -27,24 +30,29 @@ We provide official binary packages for the following platform: * Windows x86/x86_64 -You can download them from [file hosting on sourceforge.net](https://sourceforge.net/projects/openblas/files/) or from the Releases section of the github project page, [https://github.com/OpenMathLib/OpenBLAS/releases](https://github.com/OpenMathLib/OpenBLAS/releases). +You can download them from [file hosting on sourceforge.net](https://sourceforge.net/projects/openblas/files/) or from the [Releases section of the GitHub project page](https://github.com/OpenMathLib/OpenBLAS/releases). + +OpenBLAS is also packaged for many package managers - see [the installation section of the docs](http://www.openmathlib.org/OpenBLAS/docs/install/) for details. ## Installation from Source -Download from project homepage, https://github.com/OpenMathLib/OpenBLAS/, or check out the code -using Git from https://github.com/OpenMathLib/OpenBLAS.git. (If you want the most up to date version, be -sure to use the develop branch - master is several years out of date due to a change of maintainership.) -Buildtime parameters can be chosen in Makefile.rule, see there for a short description of each option. -Most can also be given directly on the make or cmake command line. +Obtain the source code from https://github.com/OpenMathLib/OpenBLAS/. Note that the default branch +is `develop` (a `master` branch is still present, but far out of date). + +Build-time parameters can be chosen in `Makefile.rule`, see there for a short description of each option. +Most options can also be given directly on the command line as parameters to your `make` or `cmake` invocation. ### Dependencies Building OpenBLAS requires the following to be installed: * GNU Make or CMake -* A C compiler, e.g. GCC or Clang +* A C compiler, e.g. GCC or Clang * A Fortran compiler (optional, for LAPACK) +In general, using a recent version of the compiler is strongly recommended. +If a Fortran compiler is not available, it is possible to compile an older version of the included LAPACK +that has been machine-translated to C. ### Normal compile @@ -60,6 +68,9 @@ For building with `cmake`, the usual conventions apply, i.e. create a build dire OpenBLAS source directory or separate from it, and invoke `cmake` there with the path to the source tree and any build options you plan to set. +For more details, see the [Building from source](http://www.openmathlib.org/OpenBLAS/docs/install/#building-from-source) +section in the docs. + ### Cross compile Set `CC` and `FC` to point to the cross toolchains, and if you use `make`, also set `HOSTCC` to your host C compiler. @@ -76,10 +87,12 @@ Examples: make CC="i686-w64-mingw32-gcc -Bstatic" FC="i686-w64-mingw32-gfortran -static-libgfortran" TARGET=HASWELL BINARY=32 CROSS=1 NUM_THREADS=20 CONSISTENT_FPCSR=1 HOSTCC=gcc ``` -You can find instructions for other cases both in the "Supported Systems" section below and in the docs folder. The .yml scripts included with the sources (which contain the +You can find instructions for other cases both in the "Supported Systems" section below and in +the [Building from source docs](http://www.openmathlib.org/OpenBLAS/docs/install). +The `.yml` scripts included with the sources (which contain the build scripts for the "continuous integration" (CI) build tests automatically run on every proposed change to the sources) may also provide additional hints. -When compiling for a more modern CPU TARGET of the same architecture, e.g. TARGET=SKYLAKEX on a HASWELL host, option "CROSS=1" can be used to suppress the automatic invocation of the tests at the end of the build. +When compiling for a more modern CPU target of the same architecture, e.g. `TARGET=SKYLAKEX` on a `HASWELL` host, option `CROSS=1` can be used to suppress the automatic invocation of the tests at the end of the build. ### Debug version @@ -325,11 +338,14 @@ Please see Changelog.txt. ## Troubleshooting -* Please read the [FAQ](https://github.com/OpenMathLib/OpenBLAS/docs/faq,md) in the docs folder first. +* Please read the [FAQ](http://www.openmathlib.org/OpenBLAS/docs/faq) section of the docs first. * Please use GCC version 4.6 and above to compile Sandy Bridge AVX kernels on Linux/MinGW/BSD. * Please use Clang version 3.1 and above to compile the library on Sandy Bridge microarchitecture. Clang 3.0 will generate the wrong AVX binary code. -* Please use GCC version 6 or LLVM version 6 and above to compile Skylake AVX512 kernels. +* Please use GCC version 6 or LLVM version 6 and above to compile Skylake/CooperLake AVX512 kernels +* Please use LLVM version 18 and above (version 19 and above on Windows) if you plan to use + its new flang compiler for Fortran +* Please use GCC version 11 and above to compile OpenBLAS on the POWER architecture * The number of CPUs/cores should be less than or equal to 256. On Linux `x86_64` (`amd64`), there is experimental support for up to 1024 CPUs/cores and 128 numa nodes if you build the library with `BIGNUMA=1`. @@ -350,4 +366,4 @@ Please see Changelog.txt. ## Donation -Please read [this wiki page](https://github.com/xianyi/OpenBLAS/wiki/Donation). +Please see [the donations section](http://www.openmathlib.org/OpenBLAS/docs/about/#donations) in the docs. diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 8c5b1e5bb2..0bdf4e3167 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -141,7 +141,7 @@ jobs: - job: OSX_OpenMP pool: - vmImage: 'macOS-12' + vmImage: 'macOS-13' steps: - script: | brew update @@ -151,7 +151,7 @@ jobs: - job: OSX_GCC_Nothreads pool: - vmImage: 'macOS-12' + vmImage: 'macOS-13' steps: - script: | brew update @@ -195,7 +195,7 @@ jobs: - job: OSX_dynarch_cmake pool: - vmImage: 'macOS-12' + vmImage: 'macOS-13' variables: LD_LIBRARY_PATH: /usr/local/opt/llvm/lib LIBRARY_PATH: /usr/local/opt/llvm/lib @@ -242,7 +242,7 @@ jobs: - job: OSX_NDK_ARMV7 pool: - vmImage: 'macOS-12' + vmImage: 'macOS-13' steps: - script: | brew update @@ -252,7 +252,7 @@ jobs: - job: OSX_IOS_ARMV8 pool: - vmImage: 'macOS-12' + vmImage: 'macOS-13' variables: CC: /Applications/Xcode_14.2.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang CFLAGS: -O2 -Wno-macro-redefined -isysroot /Applications/Xcode_14.2.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS16.2.sdk -arch arm64 -miphoneos-version-min=10.0 @@ -262,7 +262,7 @@ jobs: - job: OSX_IOS_ARMV7 pool: - vmImage: 'macOS-12' + vmImage: 'macOS-13' variables: CC: /Applications/Xcode_14.2.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang CFLAGS: -O2 -mno-thumb -Wno-macro-redefined -isysroot /Applications/Xcode_14.2.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS16.2.sdk -arch armv7 -miphoneos-version-min=5.1 @@ -272,7 +272,7 @@ jobs: - job: OSX_xbuild_DYNAMIC_ARM64 pool: - vmImage: 'macOS-12' + vmImage: 'macOS-13' variables: CC: /Applications/Xcode_14.2.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang CFLAGS: -O2 -Wno-macro-redefined -isysroot /Applications/Xcode_14.2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX13.1.sdk -arch arm64 diff --git a/benchmark/pybench/README.md b/benchmark/pybench/README.md index 7523ca75ab..43c1b3665d 100644 --- a/benchmark/pybench/README.md +++ b/benchmark/pybench/README.md @@ -43,7 +43,17 @@ have all what it takes to build OpenBLAS from source, plus `python` and $ python -mpip install numpy meson ninja pytest pytest-benchmark ``` -The benchmark syntax is consistent with that of `pytest-benchmark` framework. The incantation to run the suite locally is `$ pytest benchmark/pybench/benchmarks/test_blas.py`. +The Meson build system looks for the installed OpenBLAS using pkgconfig, so the openblas.pc created during the OpenBLAS build needs +to be somewhere on the search path of pkgconfig or in a folder pointed to by the environment variable PKG_CONFIG_PATH. + +If you want to build the benchmark suite using flang (or flang-new) instead of gfortran for the Fortran parts, you currently need +to edit the meson.build file and change the line `'fortran_std=legacy'` to `'fortran_std=none'` to work around an incompatibility +between Meson and flang. + +If you are building and running the benchmark under MS Windows, it may be necessary to copy the generated openblas_wrap module from +your build folder to the `benchmarks` folder. + +The benchmark syntax is consistent with that of `pytest-benchmark` framework. The incantation to run the suite locally is `$ pytest benchmark/pybench/benchmarks/bench_blas.py`. An ASV compatible benchmark suite is planned but currently not implemented. diff --git a/c_check b/c_check index c2b52c81b0..c3c2901712 100755 --- a/c_check +++ b/c_check @@ -6,6 +6,9 @@ hostarch=`uname -m | sed -e 's/i.86/x86/'` if [ "$hostos" = "AIX" ] || [ "$hostos" = "SunOS" ]; then hostarch=`uname -p` fi +if [ "$hostarch" = "evbarm" ]; then + hostarch=`uname -p` +fi case "$hostarch" in amd64) hostarch=x86_64 ;; arm*) [ "$hostarch" = "arm64" ] || hostarch='arm' ;; diff --git a/cmake/f_check.cmake b/cmake/f_check.cmake index 4c4f5ac044..3f713807ea 100644 --- a/cmake/f_check.cmake +++ b/cmake/f_check.cmake @@ -45,13 +45,15 @@ if (NOT ONLY_CBLAS) # TODO: detect whether underscore needed, set #defines and BU appropriately - use try_compile # TODO: set FEXTRALIB flags a la f_check? - + if (NOT (${CMAKE_SYSTEM_NAME} MATCHES "Windows" AND x${CMAKE_Fortran_COMPILER_ID} MATCHES "IntelLLVM")) set(BU "_") file(APPEND ${TARGET_CONF_TEMP} "#define BUNDERSCORE _\n" "#define NEEDBUNDERSCORE 1\n" "#define NEED2UNDERSCORES 0\n") - + else () + set (FCOMMON_OPT "${FCOMMON_OPT} /fp:precise /recursive /names:lowercase /assume:nounderscore") + endif() else () #When we only build CBLAS, we set NOFORTRAN=2 diff --git a/cmake/fc.cmake b/cmake/fc.cmake index 4ce1c99d4b..38bd406a3a 100644 --- a/cmake/fc.cmake +++ b/cmake/fc.cmake @@ -269,6 +269,31 @@ if (${F_COMPILER} STREQUAL "CRAY") endif () endif () +if (${F_COMPILER} STREQUAL "NAGFOR") + set(CCOMMON_OPT "${CCOMMON_OPT} -DF_INTERFACE_NAG") + if (INTERFACE64) + set(FCOMMON_OPT "${FCOMMON_OPT} -i8") + endif () + # Options from Makefile.system + # -dcfuns: Enable non-standard double precision complex intrinsic functions + # -ieee=full: enables all IEEE arithmetic facilities including non-stop arithmetic. + # -w=obs: Suppress warning messages about obsolescent features + # -thread_safe: Compile code for safe execution in a multi-threaded environment. + # -recursive: Specifies that procedures are RECURSIVE by default. + set(FCOMMON_OPT "${FCOMMON_OPT} -dcfuns -recursive -ieee=full -w=obs -thread_safe") + # Options from Reference-LAPACK + # Suppress compiler banner and summary + set(FCOMMON_OPT "${FCOMMON_OPT} -quiet") + # Disable other common warnings + # -w=x77: Suppress warning messages about Fortran 77 features + # -w=ques: Suppress warning messages about questionable usage + # -w=unused: Suppress warning messages about unused variables + set(FCOMMON_OPT "${FCOMMON_OPT} -w=x77 -w=ques -w=unused") + if (USE_OPENMP) + set(FCOMMON_OPT "${FCOMMON_OPT} -openmp") + endif () +endif () + # from the root Makefile - this is for lapack-netlib to compile the correct secnd file. if (${F_COMPILER} STREQUAL "GFORTRAN") set(TIMER "INT_ETIME") diff --git a/cmake/kernel.cmake b/cmake/kernel.cmake index efededcf36..2cea6d9e6e 100644 --- a/cmake/kernel.cmake +++ b/cmake/kernel.cmake @@ -79,6 +79,9 @@ macro(SetDefaultL1) SetFallback(CROTKERNEL zrot.S) SetFallback(ZROTKERNEL zrot.S) SetFallback(XROTKERNEL zrot.S) + SetFallback(SROTMKERNEL rotm.S) + SetFallback(DROTMKERNEL rotm.S) + SetFallback(QROTMKERNEL rotm.S) SetFallback(SSCALKERNEL scal.S) SetFallback(DSCALKERNEL scal.S) SetFallback(CSCALKERNEL zscal.S) diff --git a/cmake/lapack.cmake b/cmake/lapack.cmake index 003a8b3c17..6a74fb7640 100644 --- a/cmake/lapack.cmake +++ b/cmake/lapack.cmake @@ -1018,7 +1018,12 @@ foreach (LA_FILE ${LA_GEN_SRC}) endforeach () if (NOT C_LAPACK) - set_source_files_properties(${LA_SOURCES} PROPERTIES COMPILE_FLAGS "${LAPACK_FFLAGS}") + # The below line is duplicating Fortran flags but NAG has a few flags + # that cannot be specified twice. It's possible this is not needed for + # any compiler, but for safety, we only turn off for NAG + if (NOT ${F_COMPILER} STREQUAL "NAGFOR") + set_source_files_properties(${LA_SOURCES} PROPERTIES COMPILE_FLAGS "${LAPACK_FFLAGS}") + endif () if (${F_COMPILER} STREQUAL "GFORTRAN") set_source_files_properties(${LA_SOURCES} PROPERTIES COMPILE_FLAGS "${LAPACK_FFLAGS} -fno-tree-vectorize") endif() diff --git a/cmake/prebuild.cmake b/cmake/prebuild.cmake index f6ca73b7b6..c8adf4ab2f 100644 --- a/cmake/prebuild.cmake +++ b/cmake/prebuild.cmake @@ -58,7 +58,7 @@ set(TARGET_CONF_TEMP "${PROJECT_BINARY_DIR}/${TARGET_CONF}.tmp") # c_check set(FU "") -if (APPLE OR (MSVC AND NOT ${CMAKE_C_COMPILER_ID} MATCHES "Clang")) +if (APPLE OR (MSVC AND NOT (${CMAKE_C_COMPILER_ID} MATCHES "Clang" OR ${CMAKE_C_COMPILER_ID} MATCHES "IntelLLVM"))) set(FU "_") endif() if(MINGW AND NOT MINGW64) @@ -1433,7 +1433,9 @@ else(NOT CMAKE_CROSSCOMPILING) message(STATUS "MSVC") set(GETARCH_FLAGS ${GETARCH_FLAGS} -DFORCE_GENERIC) else() - list(APPEND GETARCH_SRC ${PROJECT_SOURCE_DIR}/cpuid.S) + if ("${CMAKE_SYSTEM_NAME}" STREQUAL "Darwin") + list(APPEND GETARCH_SRC ${PROJECT_SOURCE_DIR}/cpuid.S) + endif() if (DEFINED TARGET_CORE) set(GETARCH_FLAGS ${GETARCH_FLAGS} -DFORCE_${TARGET_CORE}) endif () diff --git a/cmake/system_check.cmake b/cmake/system_check.cmake index 59a1358789..fc81e9797d 100644 --- a/cmake/system_check.cmake +++ b/cmake/system_check.cmake @@ -10,6 +10,10 @@ if (${HOST_OS} STREQUAL "WINDOWS") set(HOST_OS WINNT) endif () +if (${HOST_OS} STREQUAL "IOS") + set(HOST_OS DARWIN) +endif () + if (${HOST_OS} STREQUAL "LINUX") # check if we're building natively on Android (TERMUX) EXECUTE_PROCESS( COMMAND uname -o COMMAND tr -d '\n' OUTPUT_VARIABLE OPERATING_SYSTEM) diff --git a/cmake/utils.cmake b/cmake/utils.cmake index 9befc9a3c4..a93f21686f 100644 --- a/cmake/utils.cmake +++ b/cmake/utils.cmake @@ -16,6 +16,14 @@ endfunction () macro(ParseMakefileVars MAKEFILE_IN) message(STATUS "Reading vars from ${MAKEFILE_IN}...") set (C_COMPILER ${CMAKE_C_COMPILER_ID}) + set (OSNAME ${CMAKE_SYSTEM_NAME}) + if (${C_COMPILER} MATCHES Clang) + set (C_COMPILER CLANG) + endif () + if (${OSNAME} STREQUAL Windows) + set (OSNAME WINNT) + endif () +message(STATUS OS ${OSNAME} COMPILER ${C_COMPILER}) set (IfElse 0) set (ElseSeen 0) set (SkipIfs 0) diff --git a/common.h b/common.h index 766b89cf74..8d002c4aa0 100644 --- a/common.h +++ b/common.h @@ -372,6 +372,12 @@ typedef int blasint; #endif #endif +#if defined(ARCH_RISCV64) +#ifndef YIELDING +#define YIELDING __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop;\n"); +#endif +#endif + #ifdef __EMSCRIPTEN__ #define YIELDING diff --git a/common_arm.h b/common_arm.h index 80aabc7b02..d6291018b1 100644 --- a/common_arm.h +++ b/common_arm.h @@ -102,9 +102,16 @@ static inline int blas_quickdivide(blasint x, blasint y){ #if defined(ASSEMBLER) && !defined(NEEDPARAM) +#if !defined(__APPLE__) && !defined(_WIN32) +#define OPENBLAS_ARM_TYPE_FUNCTION .type REALNAME, %function ; +#else +#define OPENBLAS_ARM_TYPE_FUNCTION +#endif + #define PROLOGUE \ .arm ;\ .global REALNAME ;\ + OPENBLAS_ARM_TYPE_FUNCTION \ REALNAME: #define EPILOGUE diff --git a/common_d.h b/common_d.h index 6f4bb2dedc..1e8c33d7a3 100644 --- a/common_d.h +++ b/common_d.h @@ -22,6 +22,7 @@ #define DSUM_K dsum_k #define DSWAP_K dswap_k #define DROT_K drot_k +#define DROTM_K drotm_k #define DGEMV_N dgemv_n #define DGEMV_T dgemv_t @@ -180,6 +181,7 @@ #define DSUM_K gotoblas -> dsum_k #define DSWAP_K gotoblas -> dswap_k #define DROT_K gotoblas -> drot_k +#define DROTM_K gotoblas -> drotm_k #define DGEMV_N gotoblas -> dgemv_n #define DGEMV_T gotoblas -> dgemv_t diff --git a/common_level1.h b/common_level1.h index d2ed47e567..85b39f7a7c 100644 --- a/common_level1.h +++ b/common_level1.h @@ -213,9 +213,9 @@ int srotmg_k(float *, float *, float *, float *, float *); int drotmg_k(double *, double *, double *, double *, double *); int qrotmg_k(xdouble *, xdouble *, xdouble *, xdouble *, xdouble *); -int srotm_k (BLASLONG, float, BLASLONG, float, BLASLONG, float); -int drotm_k (BLASLONG, double, BLASLONG, double, BLASLONG, double); -int qrotm_k (BLASLONG, xdouble, BLASLONG, xdouble, BLASLONG, xdouble); +int srotm_k (BLASLONG, float *, BLASLONG, float *, BLASLONG, float *); +int drotm_k (BLASLONG, double *, BLASLONG, double *, BLASLONG, double *); +int qrotm_k (BLASLONG, xdouble *, BLASLONG, xdouble *, BLASLONG, xdouble *); int saxpby_k (BLASLONG, float, float *, BLASLONG, float, float *, BLASLONG); diff --git a/common_macro.h b/common_macro.h index a924651de2..820cb472a6 100644 --- a/common_macro.h +++ b/common_macro.h @@ -70,6 +70,7 @@ #define SUM_K QSUM_K #define SWAP_K QSWAP_K #define ROT_K QROT_K +#define ROTM_K QROTM_K #define GEMV_N QGEMV_N #define GEMV_T QGEMV_T @@ -361,6 +362,7 @@ #define SUM_K DSUM_K #define SWAP_K DSWAP_K #define ROT_K DROT_K +#define ROTM_K DROTM_K #define GEMV_N DGEMV_N #define GEMV_T DGEMV_T @@ -977,6 +979,7 @@ #define SUM_K SSUM_K #define SWAP_K SSWAP_K #define ROT_K SROT_K +#define ROTM_K SROTM_K #define GEMV_N SGEMV_N #define GEMV_T SGEMV_T diff --git a/common_param.h b/common_param.h index e1a87f9693..d4d5a8eb27 100644 --- a/common_param.h +++ b/common_param.h @@ -77,6 +77,7 @@ BLASLONG (*isbmin_k) (BLASLONG, float *, BLASLONG); double (*dsbdot_k) (BLASLONG, float *, BLASLONG, float *, BLASLONG); int (*sbrot_k) (BLASLONG, float *, BLASLONG, float *, BLASLONG, float, float); + int (*sbrotm_k) (BLASLONG, float *, BLASLONG, float *, BLASLONG, float *); int (*sbaxpy_k) (BLASLONG, BLASLONG, BLASLONG, float, float *, BLASLONG, float *, BLASLONG, float *, BLASLONG); int (*sbscal_k) (BLASLONG, BLASLONG, BLASLONG, float, float *, BLASLONG, float *, BLASLONG, float *, BLASLONG); @@ -197,6 +198,7 @@ BLASLONG (*ismin_k) (BLASLONG, float *, BLASLONG); //double (*dsdot_k) (BLASLONG, float *, BLASLONG, float *, BLASLONG); int (*srot_k) (BLASLONG, float *, BLASLONG, float *, BLASLONG, float, float); + int (*srotm_k) (BLASLONG, float *, BLASLONG, float *, BLASLONG, float *); #endif #if (BUILD_SINGLE==1) || (BUILD_DOUBLE==1) || (BUILD_COMPLEX==1) int (*saxpy_k) (BLASLONG, BLASLONG, BLASLONG, float, float *, BLASLONG, float *, BLASLONG, float *, BLASLONG); @@ -336,6 +338,7 @@ BLASLONG (*idmin_k) (BLASLONG, double *, BLASLONG); #endif #if (BUILD_DOUBLE==1) || (BUILD_COMPLEX16==1) int (*drot_k) (BLASLONG, double *, BLASLONG, double *, BLASLONG, double, double); + int (*drotm_k) (BLASLONG, double *, BLASLONG, double *, BLASLONG, double *); int (*daxpy_k) (BLASLONG, BLASLONG, BLASLONG, double, double *, BLASLONG, double *, BLASLONG, double *, BLASLONG); int (*dscal_k) (BLASLONG, BLASLONG, BLASLONG, double, double *, BLASLONG, double *, BLASLONG, double *, BLASLONG); int (*dswap_k) (BLASLONG, BLASLONG, BLASLONG, double, double *, BLASLONG, double *, BLASLONG, double *, BLASLONG); @@ -445,6 +448,7 @@ BLASLONG (*iqmin_k) (BLASLONG, xdouble *, BLASLONG); int (*qcopy_k) (BLASLONG, xdouble *, BLASLONG, xdouble *, BLASLONG); xdouble (*qdot_k) (BLASLONG, xdouble *, BLASLONG, xdouble *, BLASLONG); int (*qrot_k) (BLASLONG, xdouble *, BLASLONG, xdouble *, BLASLONG, xdouble, xdouble); + int (*qrotm_k) (BLASLONG, xdouble *, BLASLONG, xdouble *, BLASLONG, xdouble *); int (*qaxpy_k) (BLASLONG, BLASLONG, BLASLONG, xdouble, xdouble *, BLASLONG, xdouble *, BLASLONG, xdouble *, BLASLONG); int (*qscal_k) (BLASLONG, BLASLONG, BLASLONG, xdouble, xdouble *, BLASLONG, xdouble *, BLASLONG, xdouble *, BLASLONG); diff --git a/common_q.h b/common_q.h index b4ace3a628..1d976f1e8d 100644 --- a/common_q.h +++ b/common_q.h @@ -22,6 +22,7 @@ #define QSUM_K qsum_k #define QSWAP_K qswap_k #define QROT_K qrot_k +#define QROTM_K qrotm_k #define QGEMV_N qgemv_n #define QGEMV_T qgemv_t @@ -165,6 +166,7 @@ #define QSUM_K gotoblas -> qsum_k #define QSWAP_K gotoblas -> qswap_k #define QROT_K gotoblas -> qrot_k +#define QROTM_K gotoblas -> qrotm_k #define QGEMV_N gotoblas -> qgemv_n #define QGEMV_T gotoblas -> qgemv_t diff --git a/common_s.h b/common_s.h index af9d940ae1..1dede1e365 100644 --- a/common_s.h +++ b/common_s.h @@ -24,6 +24,7 @@ #define SSCAL_K sscal_k #define SSWAP_K sswap_k #define SROT_K srot_k +#define SROTM_K srotm_k #define SGEMV_N sgemv_n #define SGEMV_T sgemv_t @@ -189,6 +190,7 @@ #define SSCAL_K gotoblas -> sscal_k #define SSWAP_K gotoblas -> sswap_k #define SROT_K gotoblas -> srot_k +#define SROTM_K gotoblas -> srotm_k #define SGEMV_N gotoblas -> sgemv_n #define SGEMV_T gotoblas -> sgemv_t diff --git a/cpuid_arm64.c b/cpuid_arm64.c index aaf5084395..20dbead23e 100644 --- a/cpuid_arm64.c +++ b/cpuid_arm64.c @@ -25,6 +25,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ +#include #include #ifdef __APPLE__ #include @@ -33,6 +34,23 @@ size_t length=sizeof(value); int64_t value64; size_t length64=sizeof(value64); #endif +#if (defined OS_LINUX || defined OS_ANDROID) +#include +#include +#ifndef HWCAP_CPUID +#define HWCAP_CPUID (1 << 11) +#endif +#ifndef HWCAP_SVE +#define HWCAP_SVE (1 << 22) +#endif +#if (defined OS_WINDOWS) +#include +#endif + +#define get_cpu_ftr(id, var) ({ \ + __asm__ __volatile__ ("mrs %0, "#id : "=r" (var)); \ + }) +#endif #define CPU_UNKNOWN 0 #define CPU_ARMV8 1 @@ -42,11 +60,11 @@ size_t length64=sizeof(value64); #define CPU_CORTEXA57 3 #define CPU_CORTEXA72 4 #define CPU_CORTEXA73 5 -#define CPU_CORTEXA76 23 +#define CPU_CORTEXA76 23 #define CPU_NEOVERSEN1 11 #define CPU_NEOVERSEV1 16 #define CPU_NEOVERSEN2 17 -#define CPU_NEOVERSEV2 24 +#define CPU_NEOVERSEV2 24 #define CPU_CORTEXX1 18 #define CPU_CORTEXX2 19 #define CPU_CORTEXA510 20 @@ -93,7 +111,7 @@ static char *cpuname[] = { "CORTEXA710", "FT2000", "CORTEXA76", - "NEOVERSEV2" + "NEOVERSEV2" }; static char *cpuname_lower[] = { @@ -121,13 +139,17 @@ static char *cpuname_lower[] = { "cortexa710", "ft2000", "cortexa76", - "neoversev2" + "neoversev2" }; +static int cpulowperf=0; +static int cpumidperf=0; +static int cpuhiperf=0; + int get_feature(char *search) { -#ifdef __linux +#if defined( __linux ) || defined( __NetBSD__ ) FILE *infile; char buffer[2048], *p,*t; p = (char *) NULL ; @@ -158,33 +180,108 @@ int get_feature(char *search) #endif return(0); } - +static int cpusort(const void *model1, const void *model2) +{ + return (*(int*)model2-*(int*)model1); +} int detect(void) { -#ifdef __linux - +#if defined( __linux ) || defined( __NetBSD__ ) + int n,i,ii; + int midr_el1; + int implementer; + int cpucap[1024]; + int cpucores[1024]; FILE *infile; - char buffer[512], *p, *cpu_part = NULL, *cpu_implementer = NULL; + char cpupart[6],cpuimpl[6]; + char *cpu_impl=NULL,*cpu_pt=NULL; + char buffer[2048], *p, *cpu_part = NULL, *cpu_implementer = NULL; p = (char *) NULL ; - - infile = fopen("/proc/cpuinfo", "r"); - while (fgets(buffer, sizeof(buffer), infile)) { - if ((cpu_part != NULL) && (cpu_implementer != NULL)) { - break; + cpulowperf=cpumidperf=cpuhiperf=0; + for (i=0;i<1024;i++)cpucores[i]=0; + n=0; + infile = fopen("/sys/devices/system/cpu/possible", "r"); + if (!infile) { + infile = fopen("/proc/cpuinfo", "r"); + while (fgets(buffer, sizeof(buffer), infile)) { + if (!strncmp("processor", buffer, 9)) + n++; } - - if ((cpu_part == NULL) && !strncmp("CPU part", buffer, 8)) { - cpu_part = strchr(buffer, ':') + 2; - cpu_part = strdup(cpu_part); - } else if ((cpu_implementer == NULL) && !strncmp("CPU implementer", buffer, 15)) { - cpu_implementer = strchr(buffer, ':') + 2; - cpu_implementer = strdup(cpu_implementer); + } else { + fgets(buffer, sizeof(buffer), infile); + sscanf(buffer,"0-%d",&n); + n++; + } + fclose(infile); + + cpu_implementer=NULL; + for (i=0;i= 0xd4b) cpuhiperf++; + else + if (cpucores[ii] >= 0xd07) cpumidperf++; + else cpulowperf++; + } + else cpulowperf++; + } + fclose(infile); + break; + } else { + (void)fgets(buffer, sizeof(buffer), infile); + midr_el1=strtoul(buffer,NULL,16); + fclose(infile); + implementer = (midr_el1 >> 24) & 0xFF; + cpucores[i] = (midr_el1 >> 4) & 0xFFF; + sprintf(buffer,"/sys/devices/system/cpu/cpu%d/cpu_capacity",i); + infile= fopen(buffer,"r"); + if (!infile) { + if (implementer== 65) { + if (cpucores[i] >= 0xd4b) cpuhiperf++; + else + if (cpucores[i] >= 0xd07) cpumidperf++; + else cpulowperf++; + } + else cpulowperf++; + } else { + (void)fgets(buffer, sizeof(buffer), infile); + sscanf(buffer,"%d",&cpucap[i]); + if (cpucap[i] >= 1000) cpuhiperf++; + else + if (cpucap[i] >= 500) cpumidperf++; + else cpulowperf++; + fclose(infile); + } } + sprintf(cpuimpl,"0x%2x",implementer); + cpu_implementer=strdup(cpuimpl); } - - fclose(infile); + qsort(cpucores,1024,sizeof(int),cpusort); + sprintf(cpupart,"0x%3x",cpucores[0]); + cpu_part=strdup(cpupart); if(cpu_part != NULL && cpu_implementer != NULL) { // Arm if (strstr(cpu_implementer, "0x41")) { @@ -219,7 +316,7 @@ int detect(void) else if (strstr(cpu_part, "0xd4f")) //NVIDIA Grace et al. return CPU_NEOVERSEV2; else if (strstr(cpu_part, "0xd0b")) - return CPU_CORTEXA76; + return CPU_CORTEXA76; } // Qualcomm else if (strstr(cpu_implementer, "0x51") && strstr(cpu_part, "0xc00")) @@ -277,11 +374,42 @@ int detect(void) } #else #ifdef __APPLE__ + sysctlbyname("hw.ncpu",&value64,&length64,NULL,0); + cpulowperf=value64; + sysctlbyname("hw.nperflevels",&value64,&length64,NULL,0); + if (value64 > 1) { + sysctlbyname("hw.perflevel0.cpusperl",&value64,&length64,NULL,0); + cpuhiperf=value64; + sysctlbyname("hw.perflevel1.cpusperl",&value64,&length64,NULL,0); + cpulowperf=value64; + } sysctlbyname("hw.cpufamily",&value64,&length64,NULL,0); if (value64 ==131287967|| value64 == 458787763 ) return CPU_VORTEX; //A12/M1 if (value64 == 3660830781) return CPU_VORTEX; //A15/M2 - if (value64 == 2271604202) return CPU_VORTEX; //A16/M3 - if (value64 == 1867590060) return CPU_VORTEX; //M4 + if (value64 == 2271604202) return CPU_VORTEX; //A16/M3 + if (value64 == 1867590060) return CPU_VORTEX; //M4 +#else +#ifdef OS_WINDOWS + HKEY reghandle; + HKEY hklm = HKEY_LOCAL_MACHINE; + WCHAR valstring[512]; + PVOID pvalstring=valstring; + DWORD size=sizeof (valstring); + DWORD type=RRF_RT_ANY; + DWORD flags=0; + LPCWSTR subkey= L"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0"; + LPCWSTR field=L"ProcessorNameString"; + LONG errcode=RegOpenKeyEx(HKEY_LOCAL_MACHINE,TEXT("Hardware\\Description\\System\\CentralProcessor\\0"), 0, KEY_READ, ®handle); + if (errcode != NO_ERROR) wprintf(L"Could not open registry key for proc0: %x\n",errcode); + errcode=RegQueryValueEx(reghandle, "ProcessorNameString", NULL,NULL ,pvalstring,&size); + if (errcode != ERROR_SUCCESS) wprintf(L"Error reading cpuname from registry:%x\n",errcode); +//wprintf(stderr,L"%s\n",(PWSTR)valstring); + RegCloseKey(reghandle); + if (strstr(valstring, "Snapdragon(R) X Elite")) return CPU_NEOVERSEN1; + if (strstr(valstring, "Ampere(R) Altra")) return CPU_NEOVERSEN1; + if (strstr(valstring, "Snapdragon (TM) 8cx Gen 3")) return CPU_CORTEXX1; + if (strstr(valstring, "Snapdragon Compute Platform")) return CPU_CORTEXX1; +#endif #endif return CPU_ARMV8; #endif @@ -314,7 +442,7 @@ void get_cpucount(void) { int n=0; -#ifdef __linux +#if defined( __linux ) || defined( __NetBSD__ ) FILE *infile; char buffer[2048], *p,*t; p = (char *) NULL ; @@ -331,10 +459,22 @@ int n=0; fclose(infile); printf("#define NUM_CORES %d\n",n); + if (cpulowperf >0) + printf("#define NUM_CORES_LP %d\n",cpulowperf); + if (cpumidperf >0) + printf("#define NUM_CORES_MP %d\n",cpumidperf); + if (cpuhiperf >0) + printf("#define NUM_CORES_HP %d\n",cpuhiperf); #endif #ifdef __APPLE__ sysctlbyname("hw.physicalcpu_max",&value,&length,NULL,0); printf("#define NUM_CORES %d\n",value); + if (cpulowperf >0) + printf("#define NUM_CORES_LP %d\n",cpulowperf); + if (cpumidperf >0) + printf("#define NUM_CORES_MP %d\n",cpumidperf); + if (cpuhiperf >0) + printf("#define NUM_CORES_HP %d\n",cpuhiperf); #endif } @@ -347,7 +487,6 @@ void get_cpuconfig(void) printf("#define ARMV8\n"); printf("#define HAVE_NEON\n"); // This shouldn't be necessary printf("#define HAVE_VFPV4\n"); // This shouldn't be necessary - int d = detect(); switch (d) { @@ -402,8 +541,8 @@ void get_cpuconfig(void) break; case CPU_NEOVERSEV1: - printf("#define HAVE_SVE 1\n"); - case CPU_CORTEXA76: + printf("#define HAVE_SVE 1\n"); + case CPU_CORTEXA76: printf("#define %s\n", cpuname[d]); printf("#define L1_CODE_SIZE 65536\n"); printf("#define L1_CODE_LINESIZE 64\n"); @@ -431,32 +570,32 @@ void get_cpuconfig(void) printf("#define L2_ASSOCIATIVE 8\n"); printf("#define DTB_DEFAULT_ENTRIES 48\n"); printf("#define DTB_SIZE 4096\n"); - printf("#define HAVE_SVE 1\n"); + printf("#define HAVE_SVE 1\n"); break; - case CPU_NEOVERSEV2: + case CPU_NEOVERSEV2: printf("#define ARMV9\n"); - printf("#define HAVE_SVE 1\n"); - printf("#define %s\n", cpuname[d]); - printf("#define L1_CODE_SIZE 65536\n"); - printf("#define L1_CODE_LINESIZE 64\n"); - printf("#define L1_CODE_ASSOCIATIVE 4\n"); - printf("#define L1_DATA_SIZE 65536\n"); - printf("#define L1_DATA_LINESIZE 64\n"); - printf("#define L1_DATA_ASSOCIATIVE 4\n"); - printf("#define L2_SIZE 1048576\n"); - printf("#define L2_LINESIZE 64\n"); - printf("#define L2_ASSOCIATIVE 8\n"); - // L1 Data TLB = 48 entries - // L2 Data TLB = 2048 entries - printf("#define DTB_DEFAULT_ENTRIES 48\n"); - printf("#define DTB_SIZE 4096\n"); // Set to 4096 for symmetry with other configs. - break; + printf("#define HAVE_SVE 1\n"); + printf("#define %s\n", cpuname[d]); + printf("#define L1_CODE_SIZE 65536\n"); + printf("#define L1_CODE_LINESIZE 64\n"); + printf("#define L1_CODE_ASSOCIATIVE 4\n"); + printf("#define L1_DATA_SIZE 65536\n"); + printf("#define L1_DATA_LINESIZE 64\n"); + printf("#define L1_DATA_ASSOCIATIVE 4\n"); + printf("#define L2_SIZE 1048576\n"); + printf("#define L2_LINESIZE 64\n"); + printf("#define L2_ASSOCIATIVE 8\n"); + // L1 Data TLB = 48 entries + // L2 Data TLB = 2048 entries + printf("#define DTB_DEFAULT_ENTRIES 48\n"); + printf("#define DTB_SIZE 4096\n"); // Set to 4096 for symmetry with other configs. + break; case CPU_CORTEXA510: case CPU_CORTEXA710: case CPU_CORTEXX1: case CPU_CORTEXX2: printf("#define ARMV9\n"); - printf("#define HAVE_SVE 1\n"); + printf("#define HAVE_SVE 1\n"); printf("#define %s\n", cpuname[d]); printf("#define L1_CODE_SIZE 65536\n"); printf("#define L1_CODE_LINESIZE 64\n"); @@ -559,8 +698,6 @@ void get_cpuconfig(void) case CPU_VORTEX: printf("#define VORTEX \n"); #ifdef __APPLE__ - sysctlbyname("hw.cpufamily",&value64,&length64,NULL,0); - if (value64 == 1867590060) printf("#define HAVE_SME 1\n");; //M4 sysctlbyname("hw.l1icachesize",&value64,&length64,NULL,0); printf("#define L1_CODE_SIZE %lld \n",value64); sysctlbyname("hw.cachelinesize",&value64,&length64,NULL,0); @@ -575,7 +712,7 @@ void get_cpuconfig(void) break; case CPU_A64FX: printf("#define A64FX\n"); - printf("#define HAVE_SVE 1\n"); + printf("#define HAVE_SVE 1\n"); printf("#define L1_CODE_SIZE 65535\n"); printf("#define L1_DATA_SIZE 65535\n"); printf("#define L1_DATA_LINESIZE 256\n"); @@ -608,7 +745,7 @@ void get_libname(void) void get_features(void) { -#ifdef __linux +#if defined( __linux ) || defined( __NetBSD__ ) FILE *infile; char buffer[2048], *p,*t; p = (char *) NULL ; diff --git a/ctest/c_cblat1.f b/ctest/c_cblat1.f index 73ab485bbd..2af54e7a65 100644 --- a/ctest/c_cblat1.f +++ b/ctest/c_cblat1.f @@ -41,7 +41,7 @@ PROGRAM CCBLAT1 IF (PASS) THEN WRITE (NOUT,99998) ELSE - CALL ABORT + ERROR STOP END IF 20 CONTINUE * @@ -231,7 +231,7 @@ SUBROUTINE CHECK1(SFAC) CALL ITEST1(ICAMAXTEST(N,CX,INCX),ITRUE3(NP1)) ELSE WRITE (NOUT,*) ' Shouldn''t be here in CHECK1' - CALL ABORT + ERROR STOP END IF * 40 CONTINUE @@ -515,7 +515,7 @@ SUBROUTINE CHECK2(SFAC) CALL CTEST(LENY,CY,CT10Y(1,KN,KI),CSIZE3,1.0E0) ELSE WRITE (NOUT,*) ' Shouldn''t be here in CHECK2' - CALL ABORT + ERROR STOP END IF * 40 CONTINUE diff --git a/ctest/c_cblat2.f b/ctest/c_cblat2.f index d48c10b7c8..d31884cddc 100644 --- a/ctest/c_cblat2.f +++ b/ctest/c_cblat2.f @@ -10,7 +10,7 @@ PROGRAM CBLAT2 * 'CBLAT2.SNAP' NAME OF SNAPSHOT OUTPUT FILE * -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) * F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. -* F LOGICAL FLAG, T TO CALL ABORT ON FAILURES. +* F LOGICAL FLAG, T TO ERROR STOP ON FAILURES. * T LOGICAL FLAG, T TO TEST ERROR EXITS. * 2 0 TO TEST COLUMN-MAJOR, 1 TO TEST ROW-MAJOR, 2 TO TEST BOTH * 16.0 THRESHOLD VALUE OF TEST RATIO @@ -243,7 +243,7 @@ PROGRAM CBLAT2 $ GO TO 70 60 CONTINUE WRITE( NOUT, FMT = 9986 )SNAMET - CALL ABORT + ERROR STOP 70 LTEST( I ) = LTESTT GO TO 50 * @@ -283,7 +283,7 @@ PROGRAM CBLAT2 SAME = LCE( YY, YT, N ) IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN WRITE( NOUT, FMT = 9985 )TRANS, SAME, ERR - CALL ABORT + ERROR STOP END IF TRANS = 'T' CALL CMVCH( TRANS, N, N, ONE, A, NMAX, X, -1, ZERO, Y, -1, YT, G, @@ -291,7 +291,7 @@ PROGRAM CBLAT2 SAME = LCE( YY, YT, N ) IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN WRITE( NOUT, FMT = 9985 )TRANS, SAME, ERR - CALL ABORT + ERROR STOP END IF * * Test each subroutine in turn. @@ -419,7 +419,7 @@ PROGRAM CBLAT2 $ CLOSE ( NTRA ) CLOSE ( NOUT ) IF( FATAL ) THEN - CALL ABORT + ERROR STOP END IF * 10002 FORMAT( ' COLUMN-MAJOR AND ROW-MAJOR DATA LAYOUTS ARE TESTED' ) diff --git a/ctest/c_cblat3.f b/ctest/c_cblat3.f index 5d289aafe0..f713b2dd0a 100644 --- a/ctest/c_cblat3.f +++ b/ctest/c_cblat3.f @@ -10,7 +10,7 @@ PROGRAM CBLAT3 * 'CBLAT3.SNAP' NAME OF SNAPSHOT OUTPUT FILE * -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) * F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. -* F LOGICAL FLAG, T TO CALL ABORT ON FAILURES. +* F LOGICAL FLAG, T TO ERROR STOP ON FAILURES. * T LOGICAL FLAG, T TO TEST ERROR EXITS. * 2 0 TO TEST COLUMN-MAJOR, 1 TO TEST ROW-MAJOR, 2 TO TEST BOTH * 16.0 THRESHOLD VALUE OF TEST RATIO @@ -194,7 +194,7 @@ PROGRAM CBLAT3 $ GO TO 50 40 CONTINUE WRITE( NOUT, FMT = 9990 )SNAMET - CALL ABORT + ERROR STOP 50 LTEST( I ) = LTESTT GO TO 30 * @@ -237,7 +237,7 @@ PROGRAM CBLAT3 SAME = LCE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR - CALL ABORT + ERROR STOP END IF TRANSB = 'C' CALL CMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, @@ -246,7 +246,7 @@ PROGRAM CBLAT3 SAME = LCE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR - CALL ABORT + ERROR STOP END IF DO 120 J = 1, N AB( J, NMAX + 1 ) = N - J + 1 @@ -264,7 +264,7 @@ PROGRAM CBLAT3 SAME = LCE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR - CALL ABORT + ERROR STOP END IF TRANSB = 'C' CALL CMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, @@ -273,7 +273,7 @@ PROGRAM CBLAT3 SAME = LCE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR - CALL ABORT + ERROR STOP END IF * * Test each subroutine in turn. @@ -386,7 +386,7 @@ PROGRAM CBLAT3 $ CLOSE ( NTRA ) CLOSE ( NOUT ) IF( FATAL ) THEN - CALL ABORT + ERROR STOP END IF * 10002 FORMAT( ' COLUMN-MAJOR AND ROW-MAJOR DATA LAYOUTS ARE TESTED' ) diff --git a/ctest/c_cblat3_3m.f b/ctest/c_cblat3_3m.f index 73fca5664f..3f8157b0ed 100644 --- a/ctest/c_cblat3_3m.f +++ b/ctest/c_cblat3_3m.f @@ -10,7 +10,7 @@ PROGRAM CBLAT3 * 'CBLAT3.SNAP' NAME OF SNAPSHOT OUTPUT FILE * -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) * F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. -* F LOGICAL FLAG, T TO CALL ABORT ON FAILURES. +* F LOGICAL FLAG, T TO ERROR STOP ON FAILURES. * T LOGICAL FLAG, T TO TEST ERROR EXITS. * 2 0 TO TEST COLUMN-MAJOR, 1 TO TEST ROW-MAJOR, 2 TO TEST BOTH * 16.0 THRESHOLD VALUE OF TEST RATIO @@ -194,7 +194,7 @@ PROGRAM CBLAT3 $ GO TO 50 40 CONTINUE WRITE( NOUT, FMT = 9990 )SNAMET - CALL ABORT + ERROR STOP 50 LTEST( I ) = LTESTT GO TO 30 * @@ -237,7 +237,7 @@ PROGRAM CBLAT3 SAME = LCE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR - CALL ABORT + ERROR STOP END IF TRANSB = 'C' CALL CMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, @@ -246,7 +246,7 @@ PROGRAM CBLAT3 SAME = LCE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR - CALL ABORT + ERROR STOP END IF DO 120 J = 1, N AB( J, NMAX + 1 ) = N - J + 1 @@ -264,7 +264,7 @@ PROGRAM CBLAT3 SAME = LCE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR - CALL ABORT + ERROR STOP END IF TRANSB = 'C' CALL CMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, @@ -273,7 +273,7 @@ PROGRAM CBLAT3 SAME = LCE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR - CALL ABORT + ERROR STOP END IF * * Test each subroutine in turn. @@ -386,7 +386,7 @@ PROGRAM CBLAT3 $ CLOSE ( NTRA ) CLOSE ( NOUT ) IF( FATAL ) THEN - CALL ABORT + ERROR STOP END IF * 10002 FORMAT( ' COLUMN-MAJOR AND ROW-MAJOR DATA LAYOUTS ARE TESTED' ) diff --git a/ctest/c_dblat1.f b/ctest/c_dblat1.f index 99c8b5da49..4877ea62b8 100644 --- a/ctest/c_dblat1.f +++ b/ctest/c_dblat1.f @@ -47,7 +47,7 @@ PROGRAM DCBLAT1 IF (PASS) THEN WRITE (NOUT,99998) ELSE - CALL ABORT + ERROR STOP END IF 20 CONTINUE * @@ -139,7 +139,7 @@ SUBROUTINE CHECK0(SFAC) CALL STEST1(SS,DS1(K),DS1(K),SFAC) ELSE WRITE (NOUT,*) ' Shouldn''t be here in CHECK0' - CALL ABORT + ERROR STOP END IF 20 CONTINUE 40 RETURN @@ -232,7 +232,7 @@ SUBROUTINE CHECK1(SFAC) CALL ITEST1(IDAMAXTEST(N,SX,INCX),ITRUE2(NP1)) ELSE WRITE (NOUT,*) ' Shouldn''t be here in CHECK1' - CALL ABORT + ERROR STOP END IF 60 CONTINUE 80 CONTINUE @@ -387,7 +387,7 @@ SUBROUTINE CHECK2(SFAC) CALL STEST(LENY,SY,STY,SSIZE2(1,1),1.0D0) ELSE WRITE (NOUT,*) ' Shouldn''t be here in CHECK2' - CALL ABORT + ERROR STOP END IF 100 CONTINUE 120 CONTINUE @@ -475,7 +475,7 @@ SUBROUTINE CHECK3(SFAC) 70 CONTINUE ELSE WRITE (NOUT,*) ' Shouldn''t be here in CHECK3' - CALL ABORT + ERROR STOP END IF 40 CONTINUE 60 CONTINUE diff --git a/ctest/c_dblat2.f b/ctest/c_dblat2.f index 01a21a7163..342382c9ed 100644 --- a/ctest/c_dblat2.f +++ b/ctest/c_dblat2.f @@ -10,7 +10,7 @@ PROGRAM DBLAT2 * 'DBLAT2.SNAP' NAME OF SNAPSHOT OUTPUT FILE * -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) * F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. -* F LOGICAL FLAG, T TO CALL ABORT ON FAILURES. +* F LOGICAL FLAG, T TO ERROR STOP ON FAILURES. * T LOGICAL FLAG, T TO TEST ERROR EXITS. * 2 0 TO TEST COLUMN-MAJOR, 1 TO TEST ROW-MAJOR, 2 TO TEST BOTH * 16.0 THRESHOLD VALUE OF TEST RATIO @@ -239,7 +239,7 @@ PROGRAM DBLAT2 $ GO TO 70 60 CONTINUE WRITE( NOUT, FMT = 9986 )SNAMET - CALL ABORT + ERROR STOP 70 LTEST( I ) = LTESTT GO TO 50 * @@ -279,7 +279,7 @@ PROGRAM DBLAT2 SAME = LDE( YY, YT, N ) IF( .NOT.SAME.OR.ERR.NE.ZERO )THEN WRITE( NOUT, FMT = 9985 )TRANS, SAME, ERR - CALL ABORT + ERROR STOP END IF TRANS = 'T' CALL DMVCH( TRANS, N, N, ONE, A, NMAX, X, -1, ZERO, Y, -1, YT, G, @@ -287,7 +287,7 @@ PROGRAM DBLAT2 SAME = LDE( YY, YT, N ) IF( .NOT.SAME.OR.ERR.NE.ZERO )THEN WRITE( NOUT, FMT = 9985 )TRANS, SAME, ERR - CALL ABORT + ERROR STOP END IF * * Test each subroutine in turn. @@ -415,7 +415,7 @@ PROGRAM DBLAT2 $ CLOSE ( NTRA ) CLOSE ( NOUT ) IF( FATAL ) THEN - CALL ABORT + ERROR STOP END IF * 10002 FORMAT( ' COLUMN-MAJOR AND ROW-MAJOR DATA LAYOUTS ARE TESTED' ) diff --git a/ctest/c_dblat3.f b/ctest/c_dblat3.f index 00d16c2961..cbd95b8544 100644 --- a/ctest/c_dblat3.f +++ b/ctest/c_dblat3.f @@ -10,7 +10,7 @@ PROGRAM DBLAT3 * 'DBLAT3.SNAP' NAME OF SNAPSHOT OUTPUT FILE * -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) * F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. -* F LOGICAL FLAG, T TO CALL ABORT ON FAILURES. +* F LOGICAL FLAG, T TO ERROR STOP ON FAILURES. * T LOGICAL FLAG, T TO TEST ERROR EXITS. * 2 0 TO TEST COLUMN-MAJOR, 1 TO TEST ROW-MAJOR, 2 TO TEST BOTH * 16.0 THRESHOLD VALUE OF TEST RATIO @@ -189,7 +189,7 @@ PROGRAM DBLAT3 $ GO TO 50 40 CONTINUE WRITE( NOUT, FMT = 9990 )SNAMET - CALL ABORT + ERROR STOP 50 LTEST( I ) = LTESTT GO TO 30 * @@ -232,7 +232,7 @@ PROGRAM DBLAT3 SAME = LDE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.ZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR - CALL ABORT + ERROR STOP END IF TRANSB = 'T' CALL DMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, @@ -241,7 +241,7 @@ PROGRAM DBLAT3 SAME = LDE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.ZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR - CALL ABORT + ERROR STOP END IF DO 120 J = 1, N AB( J, NMAX + 1 ) = N - J + 1 @@ -259,7 +259,7 @@ PROGRAM DBLAT3 SAME = LDE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.ZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR - CALL ABORT + ERROR STOP END IF TRANSB = 'T' CALL DMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, @@ -268,7 +268,7 @@ PROGRAM DBLAT3 SAME = LDE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.ZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR - CALL ABORT + ERROR STOP END IF * * Test each subroutine in turn. @@ -380,7 +380,7 @@ PROGRAM DBLAT3 $ CLOSE ( NTRA ) CLOSE ( NOUT ) IF( FATAL ) THEN - CALL ABORT + ERROR STOP END IF * 10002 FORMAT( ' COLUMN-MAJOR AND ROW-MAJOR DATA LAYOUTS ARE TESTED' ) diff --git a/ctest/c_sblat1.f b/ctest/c_sblat1.f index b88c2b7835..2e7c1d9b3f 100644 --- a/ctest/c_sblat1.f +++ b/ctest/c_sblat1.f @@ -47,7 +47,7 @@ PROGRAM SCBLAT1 IF (PASS) THEN WRITE (NOUT,99998) ELSE - CALL ABORT + ERROR STOP END IF 20 CONTINUE * @@ -139,7 +139,7 @@ SUBROUTINE CHECK0(SFAC) CALL STEST1(SS,DS1(K),DS1(K),SFAC) ELSE WRITE (NOUT,*) ' Shouldn''t be here in CHECK0' - CALL ABORT + ERROR STOP END IF 20 CONTINUE 40 RETURN @@ -232,7 +232,7 @@ SUBROUTINE CHECK1(SFAC) CALL ITEST1(ISAMAXTEST(N,SX,INCX),ITRUE2(NP1)) ELSE WRITE (NOUT,*) ' Shouldn''t be here in CHECK1' - CALL ABORT + ERROR STOP END IF 60 CONTINUE 80 CONTINUE @@ -387,7 +387,7 @@ SUBROUTINE CHECK2(SFAC) CALL STEST(LENY,SY,STY,SSIZE2(1,1),1.0E0) ELSE WRITE (NOUT,*) ' Shouldn''t be here in CHECK2' - CALL ABORT + ERROR STOP END IF 100 CONTINUE 120 CONTINUE @@ -482,7 +482,7 @@ SUBROUTINE CHECK3(SFAC) 70 CONTINUE ELSE WRITE (NOUT,*) ' Shouldn''t be here in CHECK3' - CALL ABORT + ERROR STOP END IF 40 CONTINUE 60 CONTINUE diff --git a/ctest/c_sblat2.f b/ctest/c_sblat2.f index 18d568d5d3..00cbc8f011 100644 --- a/ctest/c_sblat2.f +++ b/ctest/c_sblat2.f @@ -10,7 +10,7 @@ PROGRAM SBLAT2 * 'SBLAT2.SNAP' NAME OF SNAPSHOT OUTPUT FILE * -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) * F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. -* F LOGICAL FLAG, T TO CALL ABORT ON FAILURES. +* F LOGICAL FLAG, T TO ERROR STOP ON FAILURES. * T LOGICAL FLAG, T TO TEST ERROR EXITS. * 2 0 TO TEST COLUMN-MAJOR, 1 TO TEST ROW-MAJOR, 2 TO TEST BOTH * 16.0 THRESHOLD VALUE OF TEST RATIO @@ -239,7 +239,7 @@ PROGRAM SBLAT2 $ GO TO 70 60 CONTINUE WRITE( NOUT, FMT = 9986 )SNAMET - CALL ABORT + ERROR STOP 70 LTEST( I ) = LTESTT GO TO 50 * @@ -279,7 +279,7 @@ PROGRAM SBLAT2 SAME = LSE( YY, YT, N ) IF( .NOT.SAME.OR.ERR.NE.ZERO )THEN WRITE( NOUT, FMT = 9985 )TRANS, SAME, ERR - CALL ABORT + ERROR STOP END IF TRANS = 'T' CALL SMVCH( TRANS, N, N, ONE, A, NMAX, X, -1, ZERO, Y, -1, YT, G, @@ -287,7 +287,7 @@ PROGRAM SBLAT2 SAME = LSE( YY, YT, N ) IF( .NOT.SAME.OR.ERR.NE.ZERO )THEN WRITE( NOUT, FMT = 9985 )TRANS, SAME, ERR - CALL ABORT + ERROR STOP END IF * * Test each subroutine in turn. @@ -415,7 +415,7 @@ PROGRAM SBLAT2 $ CLOSE ( NTRA ) CLOSE ( NOUT ) IF( FATAL ) THEN - CALL ABORT + ERROR STOP END IF * 10002 FORMAT( ' COLUMN-MAJOR AND ROW-MAJOR DATA LAYOUTS ARE TESTED' ) diff --git a/ctest/c_sblat3.f b/ctest/c_sblat3.f index bbb58d04f6..61bf46997f 100644 --- a/ctest/c_sblat3.f +++ b/ctest/c_sblat3.f @@ -10,7 +10,7 @@ PROGRAM SBLAT3 * 'SBLAT3.SNAP' NAME OF SNAPSHOT OUTPUT FILE * -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) * F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. -* F LOGICAL FLAG, T TO CALL ABORT ON FAILURES. +* F LOGICAL FLAG, T TO ERROR STOP ON FAILURES. * T LOGICAL FLAG, T TO TEST ERROR EXITS. * 2 0 TO TEST COLUMN-MAJOR, 1 TO TEST ROW-MAJOR, 2 TO TEST BOTH * 16.0 THRESHOLD VALUE OF TEST RATIO @@ -188,7 +188,7 @@ PROGRAM SBLAT3 $ GO TO 50 40 CONTINUE WRITE( NOUT, FMT = 9990 )SNAMET - CALL ABORT + ERROR STOP 50 LTEST( I ) = LTESTT GO TO 30 * @@ -231,7 +231,7 @@ PROGRAM SBLAT3 SAME = LSE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.ZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR - CALL ABORT + ERROR STOP END IF TRANSB = 'T' CALL SMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, @@ -240,7 +240,7 @@ PROGRAM SBLAT3 SAME = LSE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.ZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR - CALL ABORT + ERROR STOP END IF DO 120 J = 1, N AB( J, NMAX + 1 ) = N - J + 1 @@ -258,7 +258,7 @@ PROGRAM SBLAT3 SAME = LSE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.ZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR - CALL ABORT + ERROR STOP END IF TRANSB = 'T' CALL SMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, @@ -267,7 +267,7 @@ PROGRAM SBLAT3 SAME = LSE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.ZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR - CALL ABORT + ERROR STOP END IF * * Test each subroutine in turn. @@ -379,7 +379,7 @@ PROGRAM SBLAT3 $ CLOSE ( NTRA ) CLOSE ( NOUT ) IF( FATAL ) THEN - CALL ABORT + ERROR STOP END IF * 10002 FORMAT( ' COLUMN-MAJOR AND ROW-MAJOR DATA LAYOUTS ARE TESTED' ) diff --git a/ctest/c_zblat1.f b/ctest/c_zblat1.f index 43486433e3..1d48159c91 100644 --- a/ctest/c_zblat1.f +++ b/ctest/c_zblat1.f @@ -41,7 +41,7 @@ PROGRAM ZCBLAT1 IF (PASS) THEN WRITE (NOUT,99998) ELSE - CALL ABORT + ERROR STOP END IF 20 CONTINUE * @@ -231,7 +231,7 @@ SUBROUTINE CHECK1(SFAC) CALL ITEST1(IZAMAXTEST(N,CX,INCX),ITRUE3(NP1)) ELSE WRITE (NOUT,*) ' Shouldn''t be here in CHECK1' - CALL ABORT + ERROR STOP END IF * 40 CONTINUE @@ -515,7 +515,7 @@ SUBROUTINE CHECK2(SFAC) CALL CTEST(LENY,CY,CT10Y(1,KN,KI),CSIZE3,1.0D0) ELSE WRITE (NOUT,*) ' Shouldn''t be here in CHECK2' - CALL ABORT + ERROR STOP END IF * 40 CONTINUE diff --git a/ctest/c_zblat2.f b/ctest/c_zblat2.f index daa1a603b2..220e2fd259 100644 --- a/ctest/c_zblat2.f +++ b/ctest/c_zblat2.f @@ -10,7 +10,7 @@ PROGRAM ZBLAT2 * 'CBLAT2.SNAP' NAME OF SNAPSHOT OUTPUT FILE * -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) * F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. -* F LOGICAL FLAG, T TO CALL ABORT ON FAILURES. +* F LOGICAL FLAG, T TO ERROR STOP ON FAILURES. * T LOGICAL FLAG, T TO TEST ERROR EXITS. * 2 0 TO TEST COLUMN-MAJOR, 1 TO TEST ROW-MAJOR, 2 TO TEST BOTH * 16.0 THRESHOLD VALUE OF TEST RATIO @@ -243,7 +243,7 @@ PROGRAM ZBLAT2 $ GO TO 70 60 CONTINUE WRITE( NOUT, FMT = 9986 )SNAMET - CALL ABORT + ERROR STOP 70 LTEST( I ) = LTESTT GO TO 50 * @@ -283,7 +283,7 @@ PROGRAM ZBLAT2 SAME = LZE( YY, YT, N ) IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN WRITE( NOUT, FMT = 9985 )TRANS, SAME, ERR - CALL ABORT + ERROR STOP END IF TRANS = 'T' CALL ZMVCH( TRANS, N, N, ONE, A, NMAX, X, -1, ZERO, Y, -1, YT, G, @@ -291,7 +291,7 @@ PROGRAM ZBLAT2 SAME = LZE( YY, YT, N ) IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN WRITE( NOUT, FMT = 9985 )TRANS, SAME, ERR - CALL ABORT + ERROR STOP END IF * * Test each subroutine in turn. @@ -419,7 +419,7 @@ PROGRAM ZBLAT2 $ CLOSE ( NTRA ) CLOSE ( NOUT ) IF( FATAL ) THEN - CALL ABORT + ERROR STOP END IF * 10002 FORMAT( ' COLUMN-MAJOR AND ROW-MAJOR DATA LAYOUTS ARE TESTED' ) diff --git a/ctest/c_zblat3.f b/ctest/c_zblat3.f index 83eb9e9184..e14f5af65a 100644 --- a/ctest/c_zblat3.f +++ b/ctest/c_zblat3.f @@ -10,7 +10,7 @@ PROGRAM ZBLAT3 * 'CBLAT3.SNAP' NAME OF SNAPSHOT OUTPUT FILE * -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) * F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. -* F LOGICAL FLAG, T TO CALL ABORT ON FAILURES. +* F LOGICAL FLAG, T TO ERROR STOP ON FAILURES. * T LOGICAL FLAG, T TO TEST ERROR EXITS. * 2 0 TO TEST COLUMN-MAJOR, 1 TO TEST ROW-MAJOR, 2 TO TEST BOTH * 16.0 THRESHOLD VALUE OF TEST RATIO @@ -195,7 +195,7 @@ PROGRAM ZBLAT3 $ GO TO 50 40 CONTINUE WRITE( NOUT, FMT = 9990 )SNAMET - CALL ABORT + ERROR STOP 50 LTEST( I ) = LTESTT GO TO 30 * @@ -238,7 +238,7 @@ PROGRAM ZBLAT3 SAME = LZE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR - CALL ABORT + ERROR STOP END IF TRANSB = 'C' CALL ZMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, @@ -247,7 +247,7 @@ PROGRAM ZBLAT3 SAME = LZE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR - CALL ABORT + ERROR STOP END IF DO 120 J = 1, N AB( J, NMAX + 1 ) = N - J + 1 @@ -265,7 +265,7 @@ PROGRAM ZBLAT3 SAME = LZE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR - CALL ABORT + ERROR STOP END IF TRANSB = 'C' CALL ZMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, @@ -274,7 +274,7 @@ PROGRAM ZBLAT3 SAME = LZE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR - CALL ABORT + ERROR STOP END IF * * Test each subroutine in turn. @@ -387,7 +387,7 @@ PROGRAM ZBLAT3 $ CLOSE ( NTRA ) CLOSE ( NOUT ) IF( FATAL ) THEN - CALL ABORT + ERROR STOP END IF * 10002 FORMAT( ' COLUMN-MAJOR AND ROW-MAJOR DATA LAYOUTS ARE TESTED' ) diff --git a/ctest/c_zblat3_3m.f b/ctest/c_zblat3_3m.f index d0923439e8..6f52b64036 100644 --- a/ctest/c_zblat3_3m.f +++ b/ctest/c_zblat3_3m.f @@ -10,7 +10,7 @@ PROGRAM ZBLAT3 * 'CBLAT3.SNAP' NAME OF SNAPSHOT OUTPUT FILE * -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) * F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. -* F LOGICAL FLAG, T TO CALL ABORT ON FAILURES. +* F LOGICAL FLAG, T TO ERROR STOP ON FAILURES. * T LOGICAL FLAG, T TO TEST ERROR EXITS. * 2 0 TO TEST COLUMN-MAJOR, 1 TO TEST ROW-MAJOR, 2 TO TEST BOTH * 16.0 THRESHOLD VALUE OF TEST RATIO @@ -195,7 +195,7 @@ PROGRAM ZBLAT3 $ GO TO 50 40 CONTINUE WRITE( NOUT, FMT = 9990 )SNAMET - CALL ABORT + ERROR STOP 50 LTEST( I ) = LTESTT GO TO 30 * @@ -238,7 +238,7 @@ PROGRAM ZBLAT3 SAME = LZE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR - CALL ABORT + ERROR STOP END IF TRANSB = 'C' CALL ZMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, @@ -247,7 +247,7 @@ PROGRAM ZBLAT3 SAME = LZE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR - CALL ABORT + ERROR STOP END IF DO 120 J = 1, N AB( J, NMAX + 1 ) = N - J + 1 @@ -265,7 +265,7 @@ PROGRAM ZBLAT3 SAME = LZE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR - CALL ABORT + ERROR STOP END IF TRANSB = 'C' CALL ZMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, @@ -274,7 +274,7 @@ PROGRAM ZBLAT3 SAME = LZE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR - CALL ABORT + ERROR STOP END IF * * Test each subroutine in turn. @@ -387,7 +387,7 @@ PROGRAM ZBLAT3 $ CLOSE ( NTRA ) CLOSE ( NOUT ) IF( FATAL ) THEN - CALL ABORT + ERROR STOP END IF * 10002 FORMAT( ' COLUMN-MAJOR AND ROW-MAJOR DATA LAYOUTS ARE TESTED' ) diff --git a/ctest/cblas_test.h b/ctest/cblas_test.h index 3eeb46ac2c..502a2fee20 100644 --- a/ctest/cblas_test.h +++ b/ctest/cblas_test.h @@ -10,6 +10,15 @@ #define int long #endif +#if defined(_MSC_VER) && defined(__INTEL_CLANG_COMPILER) +//#define LAPACK_COMPLEX_STRUCTURE +#define NOCHANGE +#endif +/* e.g. mingw64/x86_64-w64-mingw32/include/winerror.h */ +#ifdef FAILED +#undef FAILED +#endif + #define TRUE 1 #define PASSED 1 #define TEST_ROW_MJR 1 diff --git a/docs/build_system.md b/docs/build_system.md index 3de2205807..d5d76cc463 100644 --- a/docs/build_system.md +++ b/docs/build_system.md @@ -1,104 +1,122 @@ -This page describes the Make-based build, which is the default/authoritative -build method. Note that the OpenBLAS repository also supports building with -CMake (not described here) - that generally works and is tested, however there -may be small differences between the Make and CMake builds. +!!! info "Supported build systems" + + This page describes the Make-based build, which is the + default/authoritative build method. Note that the OpenBLAS repository also + supports building with CMake (not described here) - that generally works + and is tested, however there may be small differences between the Make and + CMake builds. + + +## Makefile dependency graph + + + +```mermaid +flowchart LR + A[Makefile] -->|included by many of the Makefiles in the subdirectories!| B(Makefile.system) + B -->|triggered, not included, once by Makefile.system, and runs before any of the actual library code is built. builds and runs the 'getarch' tool for cpu identification, runs the compiler detection scripts c_check/f_check| C{Makefile.prebuild} + C -->|either this or Makefile_kernel.conf is generated| D[Makefile.conf] + C -->|temporary Makefile.conf during DYNAMIC_ARCH builds| E[Makefile_kernel.conf] + B -->|defaults for build options that can be given on the make command line| F[Makefile.rule] + B -->|architecture-specific compiler options and OpenBLAS buffer size values| G[Makefile.$ARCH] + A --> exports + A -->|directories: test, ctest, utest, cpp_thread_test| H(test directories) + A --> I($BLASDIRS) + I --> interface + I --> driver/level2 + I --> driver/level3 + I --> driver/others + A -->|for each target in DYNAMIC_CORE if DYNAMIC_ARCH=1| kernel + A -->|subdirs: timing, testing, testing/EIG, testing/LIN| J($NETLIB_LAPACK_DIR) + A --> relapack +``` -!!! warning - This page is made by someone who is not the developer and should not be considered as an official documentation of the build system. For getting the full picture, it is best to read the Makefiles and understand them yourself. -## Makefile dep graph +## Important Variables -``` -Makefile -| -|----- Makefile.system # !!! this is included by many of the Makefiles in the subdirectories !!! -| | -| |===== Makefile.prebuild # This is triggered (not included) once by Makefile.system -| | | # and runs before any of the actual library code is built. -| | | # (builds and runs the "getarch" tool for cpu identification, -| | | # runs the compiler detection scripts c_check and f_check) -| | | -| | ----- (Makefile.conf) [ either this or Makefile_kernel.conf is generated ] -| | | { Makefile.system#L243 } -| | ----- (Makefile_kernel.conf) [ temporary Makefile.conf during DYNAMIC_ARCH builds ] -| | -| |----- Makefile.rule # defaults for build options that can be given on the make command line -| | -| |----- Makefile.$(ARCH) # architecture-specific compiler options and OpenBLAS buffer size values -| -|~~~~~ exports/ -| -|~~~~~ test/ -| -|~~~~~ utest/ -| -|~~~~~ ctest/ -| -|~~~~~ cpp_thread_test/ -| -|~~~~~ kernel/ -| -|~~~~~ ${SUBDIRS} -| -|~~~~~ ${BLASDIRS} -| -|~~~~~ ${NETLIB_LAPACK_DIR}{,/timing,/testing/{EIG,LIN}} -| -|~~~~~ relapack/ -``` +Most of the tunable variables are found in +[Makefile.rule](https://github.com/xianyi/OpenBLAS/blob/develop/Makefile.rule), +along with their detailed descriptions. -## Important Variables +Most of the variables are detected automatically in +[Makefile.prebuild](https://github.com/xianyi/OpenBLAS/blob/develop/Makefile.prebuild), +if they are not set in the environment. -Most of the tunable variables are found in [Makefile.rule](https://github.com/xianyi/OpenBLAS/blob/develop/Makefile.rule), along with their detailed descriptions.
-Most of the variables are detected automatically in [Makefile.prebuild](https://github.com/xianyi/OpenBLAS/blob/develop/Makefile.prebuild), if they are not set in the environment. +The most commonly used variables are documented below. There are more options +though - please read the linked Makefiles if you want to see all variables. ### CPU related -``` -ARCH - Target architecture (eg. x86_64) -TARGET - Target CPU architecture, in case of DYNAMIC_ARCH=1 means library will not be usable on less capable CPUs -TARGET_CORE - TARGET_CORE will override TARGET internally during each cpu-specific cycle of the build for DYNAMIC_ARCH -DYNAMIC_ARCH - For building library for multiple TARGETs (does not lose any optimizations, but increases library size) -DYNAMIC_LIST - optional user-provided subset of the DYNAMIC_CORE list in Makefile.system -``` -### Toolchain related -``` -CC - TARGET C compiler used for compilation (can be cross-toolchains) -FC - TARGET Fortran compiler used for compilation (can be cross-toolchains, set NOFORTRAN=1 if used cross-toolchain has no fortran compiler) -AR, AS, LD, RANLIB - TARGET toolchain helpers used for compilation (can be cross-toolchains) +- `ARCH`: target architecture (e.g., `x86-64`). +- `DYNAMIC_ARCH`: For building library for multiple `TARGET`s (does not lose any + optimizations, but increases library size). +- `DYNAMIC_LIST`: optional user-provided subset of the `DYNAMIC_CORE` list in + [Makefile.system](https://github.com/xianyi/OpenBLAS/blob/develop/Makefile.system). +- `TARGET`: target CPU architecture. In case of `DYNAMIC_ARCH=1`, it means that + the library will not be usable on less capable CPUs. +- `TARGET_CORE`: override `TARGET` internally during each CPU-specific cycle of + the build for `DYNAMIC_ARCH`. -HOSTCC - compiler of build machine, needed to create proper config files for target architecture -HOST_CFLAGS - flags for build machine compiler -``` -### Library related -``` -BINARY - 32/64 bit library +### Toolchain related -BUILD_SHARED - Create shared library -BUILD_STATIC - Create static library +- `CC`: `TARGET` C compiler used for compilation (can be cross-toolchains). +- `FC`: `TARGET` Fortran compiler used for compilation (can be cross-toolchains, + set `NOFORTRAN=1` if the used cross-toolchain has no Fortran compiler). +- `COMMON_OPT`: flags to add to all invocations of the target C and Fortran compilers + (overrides `CFLAGS`/`FFLAGS` - prefer using `COMMON_OPT`) +- `CCOMMON_OPT`: flags to add to all invocations of the target C compiler + (overrides `CFLAGS`) +- `FCOMMON_OPT`: flags to add to all invocations of the target Fortran compiler + (overrides `FFLAGS`) +- `LDFLAGS`: flags to add to all target linker invocations +- `AR`, `AS`, `LD`, `RANLIB`: `TARGET` toolchain helpers used for compilation + (can be cross-toolchains). +- `HOSTCC`: compiler of build machine, needed to create proper config files for + the target architecture. +- `HOST_CFLAGS`: flags for the build machine compiler. -QUAD_PRECISION - enable support for IEEE quad precision [ largely unimplemented leftover from GotoBLAS, do not use ] -EXPRECISION - Obsolete option to use float80 of SSE on BSD-like systems -INTERFACE64 - Build with 64bit integer representations to support large array index values [ incompatible with standard API ] -BUILD_SINGLE - build the single-precision real functions of BLAS [and optionally LAPACK] -BUILD_DOUBLE - build the double-precision real functions -BUILD_COMPLEX - build the single-precision complex functions -BUILD_COMPLEX16 - build the double-precision complex functions -(all four types are included in the build by default when none was specifically selected) +### Library related -BUILD_BFLOAT16 - build the "half precision brainfloat" real functions +#### Library kind and bitness options + +- `BINARY`: whether to build a 32-bit or 64-bit library (default is `64`, set + to `32` on a 32-bit platform). +- `INTERFACE64`: build with 64-bit (ILP64) integer representations to support + large array index values (incompatible with the standard 32-bit integer (LP64) API). +- `NO_STATIC`: if set to `1`, don't build a static library (default is `0`) +- `NO_SHARED`: if set to `1`, don't build a shared library (default is `0`) + +#### Data type options + +- `BUILD_SINGLE`: build the single-precision real functions of BLAS and (if + it's built) LAPACK +- `BUILD_DOUBLE`: build the double-precision real functions +- `BUILD_COMPLEX`: build the single-precision complex functions +- `BUILD_COMPLEX16`: build the double-precision complex functions +- `BUILD_BFLOAT16`: build the "half precision brainfloat" real functions +- `EXPRECISION`: (do not use, this is a work in progress) option to use `long + double` functions + +By default, the single- and double-precision real and complex floating-point +functions are included in the build, while the half- and extended-precision +functions are not. -USE_THREAD - Use a multithreading backend (default to pthread) -USE_LOCKING - implement locking for thread safety even when USE_THREAD is not set (so that the singlethreaded library can - safely be called from multithreaded programs) -USE_OPENMP - Use OpenMP as multithreading backend -NUM_THREADS - define this to the maximum number of parallel threads you expect to need (defaults to the number of cores in the build cpu) -NUM_PARALLEL - define this to the number of OpenMP instances that your code may use for parallel calls into OpenBLAS (default 1,see below) - -``` - +#### Threading options + +- `USE_THREAD`: Use a multithreading backend (defaults to `pthreads`). +- `USE_LOCKING`: implement locking for thread safety even when `USE_THREAD` is + not set (so that the single-threaded library can safely be called from + multithreaded programs). +- `USE_OPENMP`: Use OpenMP as multithreading backend +- `NUM_THREADS`: define this to the maximum number of parallel threads you + expect to need (defaults to the number of cores in the build CPU). +- `NUM_PARALLEL`: define this to the number of OpenMP instances that your code + may use for parallel calls into OpenBLAS (the default is `1`, see below). OpenBLAS uses a fixed set of memory buffers internally, used for communicating and compiling partial results from individual threads. For efficiency, the @@ -118,3 +136,32 @@ same time, then only one of them will be able to make progress while all the rest of them spin-wait for the one available buffer. Setting `NUM_PARALLEL` to the upper bound on the number of OpenMP runtimes that you can have in a process ensures that there are a sufficient number of buffer sets available. + +#### Library and symbol name options + +- `FIXED_LIBNAME`: if set to `1`, uses a non-versioned name for the library and + no symbolic linking to variant names (default is `0`) +- `LIBNAMEPREFIX`: prefix that, if given, will be inserted in the library name + before `openblas` (e.g., `xxx` will result in `libxxxopenblas.so`) +- `LIBNAMESUFFIX`: suffix that, if given, will be inserted in the library name + after `openblas`, separated by an underscore (e.g., `yyy` will result in + `libopenblas_yyy.so`) +- `SYMBOLPREFIX`: prefix that, if given, will be added to all symbol names + *and* to the library name +- `SYMBOLSUFFIX`: suffix that, if given, will be added to all symbol names + *and* to the library name + +#### BLAS and LAPACK options + +By default, the Fortran and C interfaces to BLAS and LAPACK are built, +including deprecated functions, while +[ReLAPACK](https://github.com/HPAC/ReLAPACK) is not. + +- `NO_CBLAS`: if set to `1`, don't build the CBLAS interface (default is `0`) +- `ONLY_CBLAS`: if set to `1`, only build the CBLAS interface (default is `0`) +- `NO_LAPACK`: if set to `1`, don't build LAPACK (default is `0`) +- `NO_LAPACKE`: if set to `1`, don't build the LAPACKE interface (default is `0`) +- `BUILD_LAPACK_DEPRECATED`: if set to `0`, don't build deprecated LAPACK + functions (default is `1`) +- `BUILD_RELAPACK`: if set to `1`, build Recursive LAPACK on top of LAPACK + (default is `0`) diff --git a/docs/extensions.md b/docs/extensions.md index 483b009289..bc015910d3 100644 --- a/docs/extensions.md +++ b/docs/extensions.md @@ -5,14 +5,14 @@ This page documents those non-standard APIs. ## BLAS-like extensions -| Routine | Data Types | Description | -| ------------- |:------------- | :---------------| -| ?axpby | s,d,c,z | like axpy with a multiplier for y | -| ?gemm3m | c,z | gemm3m | -| ?imatcopy | s,d,c,z | in-place transpositon/copying | -| ?omatcopy | s,d,c,z | out-of-place transpositon/copying | -| ?geadd | s,d,c,z | matrix add | -| ?gemmt | s,d,c,z | gemm but only a triangular part updated| +| Routine | Data Types | Description | +| ------------- |:------------- | :-----------------------------------------------| +| ?axpby | s,d,c,z | like `axpy` with a multiplier for `y` | +| ?gemm3m | c,z | `gemm3m` | +| ?imatcopy | s,d,c,z | in-place transposition/copying | +| ?omatcopy | s,d,c,z | out-of-place transposition/copying | +| ?geadd | s,d,c,z | ATLAS-like matrix add `B = α*A+β*B` | +| ?gemmt | s,d,c,z | `gemm` but only a triangular part updated | ## bfloat16 functionality diff --git a/docs/faq.md b/docs/faq.md index 699042d512..93d76c67fb 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -51,9 +51,9 @@ In practice, the values are derived by experimentation to yield the block sizes ### How can I report a bug? -Please file an issue at this [issue page](https://github.com/xianyi/OpenBLAS/issues) or send mail to the [OpenBLAS mailing list](https://groups.google.com/forum/#!forum/openblas-users). +Please file an issue at this [issue page](https://github.com/OpenMathLib/OpenBLAS/issues) or send mail to the [OpenBLAS mailing list](https://groups.google.com/forum/#!forum/openblas-users). -Please provide the following information: CPU, OS, compiler, and OpenBLAS compiling flags (Makefile.rule). In addition, please describe how to reproduce this bug. +Please provide the following information: CPU, OS, compiler, OpenBLAS version and any compiling flags you used (Makefile.rule). In addition, please describe how to reproduce this bug. ### How to reference OpenBLAS. @@ -99,13 +99,13 @@ Here is the result of the DGEMM subroutine's performance on Intel Core i5-2500K ### How can I call an OpenBLAS function in Microsoft Visual Studio? -Please read [this page](install.md#visual-studio). +Please read [this page](install.md#visual-studio-native-windows-abi). ### How can I use CBLAS and LAPACKE without C99 complex number support (e.g. in Visual Studio)? Zaheer has fixed this bug. You can now use the structure instead of C99 complex numbers. Please read [this issue page](http://github.com/xianyi/OpenBLAS/issues/95) for details. -[This issue](https://github.com/xianyi/OpenBLAS/issues/305) is for using LAPACKE in Visual Studio. +[This issue](https://github.com/OpenMathLib/OpenBLAS/issues/305) is for using LAPACKE in Visual Studio. ### I get a SEGFAULT with multi-threading on Linux. What's wrong? @@ -134,6 +134,13 @@ Background: OpenBLAS implements optimized versions of some LAPACK functions, so Some of the LAPACK tests, notably in xeigtstz, try to allocate around 10MB on the stack. You may need to use `ulimit -s` to change the default limits on your system to allow this. +### My build worked fine and passed the BLAS tests, but running `make lapack-test` ends with a number of errors in the summary report + +The LAPACK tests were primarily created to test the validity of the Reference-LAPACK implementation, which is implemented in unoptimized, single-threaded Fortran code. This makes it very sensitive to small numerical deviations that can result from the use of specialized cpu instructions that combine multiplications and additions without intermediate rounding and storing to memory (FMA), or from changing the order of mathematical operations by splitting an original problem workload into smaller tasks that are solved in parallel. As a result, you may encounter a small number of errors in the "numerical" column of +the summary table at the end of the `make lapack-test` run - this is usually nothing to worry about, and the exact number and distribution of errors among the +four data types will often vary with the optimization flags you supplied to the compiler, or the cpu model for which you built OpenBLAS. Sporadic errors in the column labeled `other` are normally the sign of failed convergence of iterative diagonalizations for the same reasons just mentioned. A more detailed error report is stored in the file testing_results.txt - this should be consulted in case of doubt. Care should be taken if you encounter numerical errors in the hundreds, or `other` errors accompanied by the LAPACK error message "on entry to function_name parameter X had an illegal value" that signals a problem with argument passing between individual functions. +(See also [this issue](https://github.com/OpenMathLib/OpenBLAS/issues/4032) in the issue tracker on github for additional discussion, examples and links) + ### How could I disable OpenBLAS threading affinity on runtime? You can define the OPENBLAS_MAIN_FREE or GOTOBLAS_MAIN_FREE environment variable to disable threading affinity on runtime. For example, before the running, diff --git a/docs/install.md b/docs/install.md index ffb4659d82..656c6a1219 100644 --- a/docs/install.md +++ b/docs/install.md @@ -437,49 +437,72 @@ To then use the built OpenBLAS shared library in Visual Studio: [Qt Creator](http://qt.nokia.com/products/developer-tools/). -#### Windows on Arm - -The following tools needs to be installed to build for Windows on Arm (WoA): - -- Clang for Windows on Arm. - Find the latest LLVM build for WoA from [LLVM release page](https://releases.llvm.org/). - E.g: LLVM 12 build for WoA64 can be found [here](https://github.com/llvm/llvm-project/releases/download/llvmorg-12.0.0/LLVM-12.0.0-woa64.exe) - Run the LLVM installer and ensure that LLVM is added to environment PATH. -- Download and install classic Flang for Windows on Arm. - Classic Flang is the only available Fortran compiler for Windows on Arm for now. - A pre-release build can be found [here](https://github.com/kaadam/flang/releases/tag/v0.1) - There is no installer for classic flang and the zip package can be - extracted and the path needs to be added to environment `PATH`. - E.g., in PowerShell: - ``` - $env:Path += ";C:\flang_woa\bin" - ``` - -The following steps describe how to build the static library for OpenBLAS with and without LAPACK: - -1. Build OpenBLAS static library with BLAS and LAPACK routines with Make: - - ```bash - $ make CC="clang-cl" HOSTCC="clang-cl" AR="llvm-ar" BUILD_WITHOUT_LAPACK=0 NOFORTRAN=0 DYNAMIC_ARCH=0 TARGET=ARMV8 ARCH=arm64 BINARY=64 USE_OPENMP=0 PARALLEL=1 RANLIB="llvm-ranlib" MAKE=make F_COMPILER=FLANG FC=FLANG FFLAGS_NOOPT="-march=armv8-a -cpp" FFLAGS="-march=armv8-a -cpp" NEED_PIC=0 HOSTARCH=arm64 libs netlib - ``` - -2. Build static library with BLAS routines using CMake: - - Classic Flang has compatibility issues with CMake, hence only BLAS routines can be compiled with CMake: - - ```bash - $ mkdir build - $ cd build - $ cmake .. -G Ninja -DCMAKE_C_COMPILER=clang -DBUILD_WITHOUT_LAPACK=1 -DNOFORTRAN=1 -DDYNAMIC_ARCH=0 -DTARGET=ARMV8 -DARCH=arm64 -DBINARY=64 -DUSE_OPENMP=0 -DCMAKE_SYSTEM_PROCESSOR=ARM64 -DCMAKE_CROSSCOMPILING=1 -DCMAKE_SYSTEM_NAME=Windows - $ cmake --build . --config Release - ``` - -!!! tip "`getarch.exe` execution error" - - If you notice that platform-specific headers by `getarch.exe` are not - generated correctly, this could be due to a known debug runtime DLL issue for - arm64 platforms. Please check out [this page](https://linaro.atlassian.net/wiki/spaces/WOAR/pages/28677636097/Debug+run-time+DLL+issue#Workaround) - for a workaround. +### Windows on Arm + +A fully functional native OpenBLAS for WoA that can be built as both a static and dynamic library using LLVM toolchain and Visual Studio 2022. Before starting to build, make sure that you have installed Visual Studio 2022 on your ARM device, including the "Desktop Development with C++" component (that contains the cmake tool). +(Note that you can use the free "Visual Studio 2022 Community Edition" for this task. In principle it would be possible to build with VisualStudio alone, but using +the LLVM toolchain enables native compilation of the Fortran sources of LAPACK and of all the optimized assembly files, which VisualStudio cannot handle on its own) + +1. Clone OpenBLAS to your local machine and checkout to latest release of + OpenBLAS (unless you want to build the latest development snapshot - here we + are using the 0.3.28 release as the example, of course this exact version + may be outdated by the time you read this) + + ```cmd + git clone https://github.com/OpenMathLib/OpenBLAS.git + cd OpenBLAS + git checkout v0.3.28 + ``` + +2. Install Latest LLVM toolchain for WoA: + + Download the Latest LLVM toolchain for WoA from [the Release + page](https://github.com/llvm/llvm-project/releases/tag/llvmorg-19.1.5). At + the time of writing, this is version 19.1.5 - be sure to select the + latest release for which you can find a precompiled package whose name ends + in "-woa64.exe" (precompiled packages usually lag a week or two behind their + corresponding source release). Make sure to enable the option + *“Add LLVM to the system PATH for all the users”*. + + Note: Make sure that the path of LLVM toolchain is at the top of Environment + Variables section to avoid conflicts between the set of compilers available + in the system path + +3. Launch the Native Command Prompt for Windows ARM64: + + From the start menu search for *"ARM64 Native Tools Command Prompt for Visual + Studio 2022"*. Alternatively open command prompt, run the following command to + activate the environment: + + ```cmd + C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Auxiliary\Build\vcvarsarm64.bat + ``` + +4. Navigate to the OpenBLAS source code directory and start building OpenBLAS + by invoking Ninja: + + ```cmd + cd OpenBLAS + mkdir build + cd build + + cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release -DTARGET=ARMV8 -DBINARY=64 -DCMAKE_C_COMPILER=clang-cl -DCMAKE_Fortran_COMPILER=flang-new + + ninja -j16 + ``` + + Note: You might want to include additional options in the cmake command + here. For example, the default configuration only generates a + `static.lib` version of the library. If you prefer a DLL, you can add + `-DBUILD_SHARED_LIBS=ON`. + + Note that it is also possible to use the same setup to build OpenBLAS + with Make, if you prefer Makefiles over the CMake build for some + reason: + + ```cmd + $ make CC=clang-cl FC=flang-new AR="llvm-ar" TARGET=ARMV8 ARCH=arm64 RANLIB="llvm-ranlib" MAKE=make + ``` #### Generating an import library @@ -501,7 +524,7 @@ In your shell, move to this directory: `cd exports`. incompatibility in the C ABI would be a bug). The import libraries of MSVC have the suffix `.lib`. They are generated - from a `.def` file using MSVC's `lib.exe`. See [the MSVC instructions](use_visual_studio.md#generate-import-library-before-0210-version). + from a `.def` file using MSVC's `lib.exe`. === "MinGW" @@ -532,7 +555,6 @@ In your shell, move to this directory: `cd exports`. To build OpenBLAS for Android, you will need the following tools installed on your machine: - [The Android NDK](https://developer.android.com/ndk/) -- Perl - Clang compiler on the build machine The next two sections below describe how to build with Clang for ARMV7 and @@ -574,7 +596,9 @@ utility in the make command above, like so: AR=${NDK_BUNDLE_DIR}/toolchains/arm-linux-androideabi-4.9/prebuilt/darwin-x86_64/bin/arm-linux-androideabi-gcc-ar ``` otherwise you may get a linker error complaining like `malformed archive header -name at 8` when the native macOS `ar` command was invoked instead. +name at 8` when the native macOS `ar` command was invoked instead. Note that +with recent NDK versions, the AR tool may be named `llvm-ar` rather than what +is assumed above. #### Building for ARMV8 @@ -604,12 +628,17 @@ Note: for NDK 23b, something as simple as: export PATH=/opt/android-ndk-r23b/toolchains/llvm/prebuilt/linux-x86_64/bin/:$PATH make HOSTCC=gcc CC=/opt/android-ndk-r23b/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android31-clang ONLY_CBLAS=1 TARGET=ARMV8 ``` -appears to be sufficient on Linux. +appears to be sufficient on Linux. On OSX, setting AR to the ar provided in the +"bin" path of the NDK (probably `llvm-ar`) is also necessary. ??? note "Alternative build script for 3 architectures" - This script will build OpenBLAS for 3 architecture (`ARMV7`, `ARMV8`, `X86`) and install them to `/opt/OpenBLAS/lib`. + This script will build OpenBLAS for 3 architecture (`ARMV7`, `ARMV8`, + `X86`) and install them to `/opt/OpenBLAS/lib`. Of course you can also copy + only the section that is of interest to you - also notice that the `AR=` + line may need adapting to the name of the ar tool provided in your + `$TOOLCHAIN/bin` - for example `llvm-ar` in some recent NDK versions. It was tested on macOS with NDK version 21.3.6528147. ```bash @@ -680,6 +709,40 @@ make TARGET=ARMV8 DYNAMIC_ARCH=1 NUM_THREADS=32 HOSTCC=clang NOFORTRAN=1 Adjust `MIN_IOS_VERSION` as necessary for your installation. E.g., change the version number to the minimum iOS version you want to target and execute this file to build the library. +### HarmonyOS + +For this target you will need the cross-compiler toolchain package by Huawei, +which contains solutions for both Windows and Linux. Only the Linux-based +toolchain has been tested so far, but the following instructions may apply +similarly to Windows: + +Download [this HarmonyOS 4.1.1 SDK](https://repo.huaweicloud.com/harmonyos/os/4.1.1-Release/ohos-sdk-windows_linux-public.tar.gz), +or whatever newer version may be available in the future). Use `tar -xvf +ohos-sdk-windows_linux_public.tar.gz` to unpack it somewhere on your system. +This will create a folder named "ohos-sdk" with subfolders "linux" and +"windows". In the linux one you will find a ZIP archive named +`native-linux-x64-4.1.7.8-Release.zip` - you need to unzip this where you want +to install the cross-compiler, for example in `/opt/ohos-sdk`. + +In the directory where you unpacked OpenBLAS, create a build directory for cmake, and change into it : +```bash +mkdir build +cd build +``` +Use the version of `cmake` that came with the SDK, and specify the location of +its toolchain file as a cmake option. Also set the build target for OpenBLAS to +`ARMV8` and specify `NOFORTRAN=1` (at least as of version 4.1.1, the SDK +contains no Fortran compiler): +```bash +/opt/ohos-sdk/linux/native/build-tools/cmake/bin/cmake \ + -DCMAKE_TOOLCHAIN_FILE=/opt/ohos-sdk/linux/native/build/cmake/ohos.toolchain.cmake \ + -DOHOS_ARCH="arm64-v8a" -DTARGET=ARMV8 -DNOFORTRAN=1 .. +``` +Additional other OpenBLAS build options like `USE_OPENMP=1` or `DYNAMIC_ARCH=1` +will probably work too. Finally do the build: +```bash +/opt/ohos-sdk/linux/native/build-tools/cmake/bin/cmake --build . +``` ### MIPS diff --git a/docs/runtime_variables.md b/docs/runtime_variables.md new file mode 100644 index 0000000000..f1ffb791fd --- /dev/null +++ b/docs/runtime_variables.md @@ -0,0 +1,38 @@ +OpenBLAS checks the following environment variables on startup: + +* `OPENBLAS_NUM_THREADS`: the number of threads to use (for non-OpenMP builds + of OpenBLAS) +* `OMP_NUM_THREADS`: the number of threads to use (for OpenMP builds - note + that setting this may also affect any other OpenMP code) +* `OPENBLAS_DEFAULT_NUM_THREADS`: the number of threads to use, irrespective if + OpenBLAS was built for OpenMP or pthreads + +* `OPENBLAS_MAIN_FREE=1`: this can be used to disable automatic assignment of + cpu affinity in OpenBLAS builds that have it enabled by default +* `OPENBLAS_THREAD_TIMEOUT`: this can be used to define the length of time + that idle threads should wait before exiting +* `OMP_ADAPTIVE=1`: this can be used in OpenMP builds to actually remove any + surplus threads when the number of threads is decreased + + +`DYNAMIC_ARCH` builds also accept the following: + +* `OPENBLAS_VERBOSE`: + + - set this to `1` to enable a warning when there is no exact match for the + detected cpu in the library + - set this to `2` to make OpenBLAS print the name of the cpu target it + autodetected + +* `OPENBLAS_CORETYPE`: set this to one of the supported target names to + override autodetection, e.g., `OPENBLAS_CORETYPE=HASWELL` +* `OPENBLAS_L2_SIZE`: set this to override the autodetected size of the L2 + cache where it is not reported correctly (in virtual environments) + + +Deprecated variables still recognized for compatibilty: + +* `GOTO_NUM_THREADS`: equivalent to `OPENBLAS_NUM_THREADS` +* `GOTOBLAS_MAIN_FREE`: equivalent to `OPENBLAS_MAIN_FREE` +* `OPENBLAS_BLOCK_FACTOR`: this applies a scale factor to the GEMM "P" + parameter of the block matrix code, see file `driver/others/parameter.c` diff --git a/driver/others/dynamic_arm64.c b/driver/others/dynamic_arm64.c index 3d2bed4af8..5d48f6806e 100644 --- a/driver/others/dynamic_arm64.c +++ b/driver/others/dynamic_arm64.c @@ -286,22 +286,59 @@ static gotoblas_t *get_coretype(void) { if (!(getauxval(AT_HWCAP) & HWCAP_CPUID)) { #ifdef __linux + int i; + int ncores=0; + int prt,cpucap,cpulowperf=0,cpumidperf=0,cpuhiperf=0; FILE *infile; - char buffer[512], *p, *cpu_part = NULL, *cpu_implementer = NULL; - p = (char *) NULL ; - infile = fopen("/sys/devices/system/cpu/cpu0/regs/identification/midr_el1","r"); - if (!infile) return NULL; - (void)fgets(buffer, sizeof(buffer), infile); - midr_el1=strtoul(buffer,NULL,16); - fclose(infile); -#else + char buffer[512], *cpu_part = NULL, *cpu_implementer = NULL; + + infile = fopen("/sys/devices/system/cpu/possible","r"); + if (infile) { + (void)fgets(buffer, sizeof(buffer), infile); + sscanf(buffer,"0-%d",&ncores); + fclose (infile); + ncores++; + } else { + infile = fopen("/proc/cpuinfo","r"); + while (fgets(buffer, sizeof(buffer), infile)) { + if (!strncmp("processor", buffer, 9)) + ncores++; + } + } + for (i=0;i> 24) & 0xFF; + prt = (midr_el1 >> 4) & 0xFFF; + fclose(infile); + sprintf(buffer,"/sys/devices/system/cpu/cpu%d/cpu_capability",i); + infile = fopen(buffer,"r"); + if (infile) { + (void)fgets(buffer, sizeof(buffer), infile); + cpucap=strtoul(buffer,NULL,16); + fclose(infile); + if (cpucap >= 1000) cpuhiperf++; + else if (cpucap >=500) cpumidperf++; + else cpulowperf++; + if (cpucap >=1000) part = prt; + } else if (implementer == 0x41 ){ + if (prt >= 0xd4b) cpuhiperf++; + else if (prt>= 0xd07) cpumidperf++; + else cpulowperf++; + } else cpulowperf++; + } + if (!part) part = prt; +#else snprintf(coremsg, 128, "Kernel lacks cpuid feature support. Auto detection of core type failed !!!\n"); openblas_warning(1, coremsg); return NULL; #endif } else { get_cpu_ftr(MIDR_EL1, midr_el1); - } + /* * MIDR_EL1 * @@ -312,7 +349,7 @@ static gotoblas_t *get_coretype(void) { */ implementer = (midr_el1 >> 24) & 0xFF; part = (midr_el1 >> 4) & 0xFFF; - + } switch(implementer) { case 0x41: // ARM diff --git a/driver/others/memory.c b/driver/others/memory.c index 6343a3785e..c53e798bc1 100644 --- a/driver/others/memory.c +++ b/driver/others/memory.c @@ -2538,7 +2538,7 @@ static void *alloc_shm(void *address){ } #endif -#if defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS +#if ((defined ALLOC_HUGETLB) && (defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS)) static void alloc_hugetlb_free(struct release_t *release){ @@ -3254,7 +3254,7 @@ void blas_shutdown(void){ #endif newmemory[pos].lock = 0; } - free(newmemory); + free((void*)newmemory); newmemory = NULL; memory_overflowed = 0; } diff --git a/exports/gensymbol b/exports/gensymbol index f3ca9a427e..f747dd091f 100755 --- a/exports/gensymbol +++ b/exports/gensymbol @@ -869,8 +869,12 @@ lapackobjs2z="$lapackobjs2z #functions added post 3.11 lapackobjs2c="$lapackobjs2c + cgelst + cgeqp3rk claqp2rk claqp3rk + clatrs3 + crscl ctrsyl3 " # claqz0 @@ -894,6 +898,16 @@ lapackobjs2d="$lapackobjs2d # dlaqz3 # dlaqz4 +lapackobjs2s="$lapackobjs2s + sgelst + sgeqp3rk + slaqp2rk + slaqp3rk + slarmm + slatrs3 + strsyl3 + " + lapackobjs2z="$lapackobjs2z zgelst zgeqp3rk diff --git a/interface/gemv.c b/interface/gemv.c index 2c121f1308..f91f364eed 100644 --- a/interface/gemv.c +++ b/interface/gemv.c @@ -63,6 +63,36 @@ static int (*gemv_thread[])(BLASLONG, BLASLONG, FLOAT, FLOAT *, BLASLONG, FLOAT }; #endif +#ifdef DYNAMIC_ARCH + extern char* gotoblas_corename(void); +#endif + +#if defined(DYNAMIC_ARCH) || defined(NEOVERSEV1) +static inline int get_gemv_optimal_nthreads_neoversev1(BLASLONG MN, int ncpu) { + return + MN < 25600L ? 1 + : MN < 63001L ? MIN(ncpu, 4) + : MN < 459684L ? MIN(ncpu, 16) + : ncpu; +} +#endif + +static inline int get_gemv_optimal_nthreads(BLASLONG MN) { + int ncpu = num_cpu_avail(3); +#if defined(NEOVERSEV1) && !defined(COMPLEX) && !defined(DOUBLE) && !defined(BFLOAT16) + return get_gemv_optimal_nthreads_neoversev1(MN, ncpu); +#elif defined(DYNAMIC_ARCH) && !defined(COMPLEX) && !defined(DOUBLE) && !defined(BFLOAT16) + if (strcmp(gotoblas_corename(), "neoversev1") == 0) { + return get_gemv_optimal_nthreads_neoversev1(MN, ncpu); + } +#endif + + if ( MN < 115200L * GEMM_MULTITHREAD_THRESHOLD ) + return 1; + else + return num_cpu_avail(2); +} + #ifndef CBLAS void NAME(char *TRANS, blasint *M, blasint *N, @@ -225,11 +255,7 @@ void CNAME(enum CBLAS_ORDER order, STACK_ALLOC(buffer_size, FLOAT, buffer); #ifdef SMP - - if ( 1L * m * n < 115200L * GEMM_MULTITHREAD_THRESHOLD ) - nthreads = 1; - else - nthreads = num_cpu_avail(2); + nthreads = get_gemv_optimal_nthreads(1L * m * n); if (nthreads == 1) { #endif diff --git a/interface/lapack/trtri.c b/interface/lapack/trtri.c index 0285293892..df79f26656 100644 --- a/interface/lapack/trtri.c +++ b/interface/lapack/trtri.c @@ -127,6 +127,9 @@ int NAME(char *UPLO, char *DIAG, blasint *N, FLOAT *a, blasint *ldA, blasint *In #endif #ifdef SMP +if (args.n <= 150) + args.nthreads = 1; +else args.nthreads = num_cpu_avail(4); if (args.nthreads == 1) { diff --git a/interface/rotm.c b/interface/rotm.c index 9dc08354ac..9ef87da329 100644 --- a/interface/rotm.c +++ b/interface/rotm.c @@ -7,149 +7,21 @@ void NAME(blasint *N, FLOAT *dx, blasint *INCX, FLOAT *dy, blasint *INCY, FLOAT *dparam){ - blasint n = *N; - blasint incx = *INCX; - blasint incy = *INCY; + blasint n = *N; + blasint incx = *INCX; + blasint incy = *INCY; + PRINT_DEBUG_NAME #else void CNAME(blasint n, FLOAT *dx, blasint incx, FLOAT *dy, blasint incy, FLOAT *dparam){ -#endif - - blasint i__1, i__2; + PRINT_DEBUG_CNAME; - blasint i__; - FLOAT w, z__; - blasint kx, ky; - FLOAT dh11, dh12, dh22, dh21, dflag; - blasint nsteps; - -#ifndef CBLAS - PRINT_DEBUG_CNAME; -#else - PRINT_DEBUG_CNAME; #endif - --dparam; - --dy; - --dx; - - dflag = dparam[1]; - if (n <= 0 || dflag == - 2.0) goto L140; - - if (! (incx == incy && incx > 0)) goto L70; - - nsteps = n * incx; - if (dflag < 0.) { - goto L50; - } else if (dflag == 0) { - goto L10; - } else { - goto L30; - } -L10: - dh12 = dparam[4]; - dh21 = dparam[3]; - i__1 = nsteps; - i__2 = incx; - for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { - w = dx[i__]; - z__ = dy[i__]; - dx[i__] = w + z__ * dh12; - dy[i__] = w * dh21 + z__; -/* L20: */ - } - goto L140; -L30: - dh11 = dparam[2]; - dh22 = dparam[5]; - i__2 = nsteps; - i__1 = incx; - for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) { - w = dx[i__]; - z__ = dy[i__]; - dx[i__] = w * dh11 + z__; - dy[i__] = -w + dh22 * z__; -/* L40: */ - } - goto L140; -L50: - dh11 = dparam[2]; - dh12 = dparam[4]; - dh21 = dparam[3]; - dh22 = dparam[5]; - i__1 = nsteps; - i__2 = incx; - for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { - w = dx[i__]; - z__ = dy[i__]; - dx[i__] = w * dh11 + z__ * dh12; - dy[i__] = w * dh21 + z__ * dh22; -/* L60: */ - } - goto L140; -L70: - kx = 1; - ky = 1; - if (incx < 0) { - kx = (1 - n) * incx + 1; - } - if (incy < 0) { - ky = (1 - n) * incy + 1; - } + ROTM_K(n, dx, incx, dy, incy, dparam); - if (dflag < 0.) { - goto L120; - } else if (dflag == 0) { - goto L80; - } else { - goto L100; - } -L80: - dh12 = dparam[4]; - dh21 = dparam[3]; - i__2 = n; - for (i__ = 1; i__ <= i__2; ++i__) { - w = dx[kx]; - z__ = dy[ky]; - dx[kx] = w + z__ * dh12; - dy[ky] = w * dh21 + z__; - kx += incx; - ky += incy; -/* L90: */ - } - goto L140; -L100: - dh11 = dparam[2]; - dh22 = dparam[5]; - i__2 = n; - for (i__ = 1; i__ <= i__2; ++i__) { - w = dx[kx]; - z__ = dy[ky]; - dx[kx] = w * dh11 + z__; - dy[ky] = -w + dh22 * z__; - kx += incx; - ky += incy; -/* L110: */ - } - goto L140; -L120: - dh11 = dparam[2]; - dh12 = dparam[4]; - dh21 = dparam[3]; - dh22 = dparam[5]; - i__2 = n; - for (i__ = 1; i__ <= i__2; ++i__) { - w = dx[kx]; - z__ = dy[ky]; - dx[kx] = w * dh11 + z__ * dh12; - dy[ky] = w * dh21 + z__ * dh22; - kx += incx; - ky += incy; -/* L130: */ - } -L140: return; } diff --git a/kernel/CMakeLists.txt b/kernel/CMakeLists.txt index 74e6760c27..b43cda2c14 100644 --- a/kernel/CMakeLists.txt +++ b/kernel/CMakeLists.txt @@ -65,6 +65,7 @@ function (build_core TARGET_CORE KDIR TSUFFIX KERNEL_DEFINITIONS) GenerateNamedObjects("${KERNELDIR}/${${float_char}COPYKERNEL}" "C_INTERFACE" "copy_k" false "" "" false ${float_type}) GenerateNamedObjects("${KERNELDIR}/${${float_char}NRM2KERNEL}" "" "nrm2_k" false "" "" false ${float_type}) GenerateNamedObjects("${KERNELDIR}/${${float_char}ROTKERNEL}" "" "rot_k" false "" "" false ${float_type}) + GenerateNamedObjects("${KERNELDIR}/${${float_char}ROTMKERNEL}" "" "rotm_k" false "" "" false ${float_type}) GenerateNamedObjects("${KERNELDIR}/${${float_char}SCALKERNEL}" "" "scal_k" false "" "" false ${float_type}) GenerateNamedObjects("${KERNELDIR}/${${float_char}SWAPKERNEL}" "" "swap_k" false "" "" false ${float_type}) GenerateNamedObjects("${KERNELDIR}/${${float_char}AXPBYKERNEL}" "" "axpby_k" false "" "" false ${float_type}) @@ -125,6 +126,7 @@ function (build_core TARGET_CORE KDIR TSUFFIX KERNEL_DEFINITIONS) GenerateNamedObjects("${KERNELDIR}/${SNRM2KERNEL}" "" "nrm2_k" false "" "" false "SINGLE") GenerateNamedObjects("${KERNELDIR}/${SDOTKERNEL}" "" "dot_k" false "" "" false "SINGLE") GenerateNamedObjects("${KERNELDIR}/${SROTKERNEL}" "" "rot_k" false "" "" false "SINGLE") + GenerateNamedObjects("${KERNELDIR}/${SROTMKERNEL}" "" "rotm_k" false "" "" false "SINGLE") endif () if (BUILD_COMPLEX16 AND NOT BUILD_DOUBLE) GenerateNamedObjects("${KERNELDIR}/${DAMAXKERNEL}" "USE_ABS" "amax_k" false "" "" false "DOUBLE") @@ -148,6 +150,7 @@ function (build_core TARGET_CORE KDIR TSUFFIX KERNEL_DEFINITIONS) GenerateNamedObjects("${KERNELDIR}/${DCOPYKERNEL}" "C_INTERFACE" "copy_k" false "" "" false "DOUBLE") GenerateNamedObjects("${KERNELDIR}/${DNRM2KERNEL}" "" "nrm2_k" false "" "" false "DOUBLE") GenerateNamedObjects("${KERNELDIR}/${DROTKERNEL}" "" "rot_k" false "" "" false "DOUBLE") + GenerateNamedObjects("${KERNELDIR}/${DROTMKERNEL}" "" "rotm_k" false "" "" false "DOUBLE") GenerateNamedObjects("${KERNELDIR}/${DDOTKERNEL}" "" "dot_k" false "" "" false "DOUBLE") GenerateNamedObjects("${KERNELDIR}/${DSWAPKERNEL}" "" "swap_k" false "" "" false "DOUBLE") GenerateNamedObjects("${KERNELDIR}/${DAXPYKERNEL}" "" "axpy_k" false "" "" false "DOUBLE") @@ -1105,6 +1108,7 @@ endif () GenerateNamedObjects("${KERNELDIR}/${DCOPYKERNEL}" "C_INTERFACE" "copy_k" false "" "" false "DOUBLE") GenerateNamedObjects("${KERNELDIR}/${DNRM2KERNEL}" "" "nrm2_k" false "" "" false "DOUBLE") GenerateNamedObjects("${KERNELDIR}/${DROTKERNEL}" "" "rot_k" false "" "" false "DOUBLE") + GenerateNamedObjects("${KERNELDIR}/${DROTMKERNEL}" "" "rotm_k" false "" "" false "DOUBLE") GenerateNamedObjects("${KERNELDIR}/${DDOTKERNEL}" "" "dot_k" false "" "" false "DOUBLE") GenerateNamedObjects("${KERNELDIR}/${DSWAPKERNEL}" "" "swap_k" false "" "" false "DOUBLE") GenerateNamedObjects("${KERNELDIR}/${DAXPYKERNEL}" "" "axpy_k" false "" "" false "DOUBLE") diff --git a/kernel/Makefile.L1 b/kernel/Makefile.L1 index 09337363da..0fc6720944 100644 --- a/kernel/Makefile.L1 +++ b/kernel/Makefile.L1 @@ -336,6 +336,18 @@ ifndef XROTKERNEL XROTKERNEL = zrot.S endif +ifndef SROTMKERNEL +SROTMKERNEL = rotm.S +endif + +ifndef DROTMKERNEL +DROTMKERNEL = rotm.S +endif + +ifndef QROTMKERNEL +QROTMKERNEL = rotm.S +endif + ### SCAL ### ifndef SSCALKERNEL @@ -504,21 +516,21 @@ SBLASOBJS += \ sasum_k$(TSUFFIX).$(SUFFIX) ssum_k$(TSUFFIX).$(SUFFIX) saxpy_k$(TSUFFIX).$(SUFFIX) scopy_k$(TSUFFIX).$(SUFFIX) \ sdot_k$(TSUFFIX).$(SUFFIX) sdsdot_k$(TSUFFIX).$(SUFFIX) dsdot_k$(TSUFFIX).$(SUFFIX) \ snrm2_k$(TSUFFIX).$(SUFFIX) srot_k$(TSUFFIX).$(SUFFIX) sscal_k$(TSUFFIX).$(SUFFIX) sswap_k$(TSUFFIX).$(SUFFIX) \ - saxpby_k$(TSUFFIX).$(SUFFIX) + saxpby_k$(TSUFFIX).$(SUFFIX) srotm_k$(TSUFFIX).$(SUFFIX) DBLASOBJS += \ damax_k$(TSUFFIX).$(SUFFIX) damin_k$(TSUFFIX).$(SUFFIX) dmax_k$(TSUFFIX).$(SUFFIX) dmin_k$(TSUFFIX).$(SUFFIX) \ idamax_k$(TSUFFIX).$(SUFFIX) idamin_k$(TSUFFIX).$(SUFFIX) idmax_k$(TSUFFIX).$(SUFFIX) idmin_k$(TSUFFIX).$(SUFFIX) \ dasum_k$(TSUFFIX).$(SUFFIX) daxpy_k$(TSUFFIX).$(SUFFIX) dcopy_k$(TSUFFIX).$(SUFFIX) ddot_k$(TSUFFIX).$(SUFFIX) \ dnrm2_k$(TSUFFIX).$(SUFFIX) drot_k$(TSUFFIX).$(SUFFIX) dscal_k$(TSUFFIX).$(SUFFIX) dswap_k$(TSUFFIX).$(SUFFIX) \ - daxpby_k$(TSUFFIX).$(SUFFIX) dsum_k$(TSUFFIX).$(SUFFIX) + daxpby_k$(TSUFFIX).$(SUFFIX) dsum_k$(TSUFFIX).$(SUFFIX) drotm_k$(TSUFFIX).$(SUFFIX) QBLASOBJS += \ qamax_k$(TSUFFIX).$(SUFFIX) qamin_k$(TSUFFIX).$(SUFFIX) qmax_k$(TSUFFIX).$(SUFFIX) qmin_k$(TSUFFIX).$(SUFFIX) \ iqamax_k$(TSUFFIX).$(SUFFIX) iqamin_k$(TSUFFIX).$(SUFFIX) iqmax_k$(TSUFFIX).$(SUFFIX) iqmin_k$(TSUFFIX).$(SUFFIX) \ qasum_k$(TSUFFIX).$(SUFFIX) qaxpy_k$(TSUFFIX).$(SUFFIX) qcopy_k$(TSUFFIX).$(SUFFIX) qdot_k$(TSUFFIX).$(SUFFIX) \ qnrm2_k$(TSUFFIX).$(SUFFIX) qrot_k$(TSUFFIX).$(SUFFIX) qscal_k$(TSUFFIX).$(SUFFIX) qswap_k$(TSUFFIX).$(SUFFIX) \ - qsum_k$(TSUFFIX).$(SUFFIX) + qsum_k$(TSUFFIX).$(SUFFIX) qrotm_k$(TSUFFIX).$(SUFFIX) CBLASOBJS += \ camax_k$(TSUFFIX).$(SUFFIX) camin_k$(TSUFFIX).$(SUFFIX) icamax_k$(TSUFFIX).$(SUFFIX) icamin_k$(TSUFFIX).$(SUFFIX) \ @@ -842,7 +854,16 @@ $(KDIR)drot_k$(TSUFFIX).$(SUFFIX) $(KDIR)drot_k$(TPSUFFIX).$(PSUFFIX) : $(KERN $(CC) -c $(CFLAGS) $(FMAFLAG) -UCOMPLEX -UCOMPLEX -DDOUBLE $< -o $@ $(KDIR)qrot_k$(TSUFFIX).$(SUFFIX) $(KDIR)qrot_k$(TPSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(QROTKERNEL) - $(CC) -c $(CFLAGS) -UCOMPLEX -UCOMPLEX -DXDOUBLE $< -o $@ + $(CC) -c $(CFLAGS) $(FMAFLAG) -UCOMPLEX -UCOMPLEX -DXDOUBLE $< -o $@ + +$(KDIR)srotm_k$(TSUFFIX).$(SUFFIX) $(KDIR)srotm_k$(TPSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(SROTMKERNEL) + $(CC) -c $(CFLAGS) $(FMAFLAG) -UCOMPLEX -UCOMPLEX -UDOUBLE $< -o $@ + +$(KDIR)drotm_k$(TSUFFIX).$(SUFFIX) $(KDIR)drotm_k$(TPSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(DROTMKERNEL) + $(CC) -c $(CFLAGS) $(FMAFLAG) -UCOMPLEX -UCOMPLEX -DDOUBLE $< -o $@ + +$(KDIR)qrotm_k$(TSUFFIX).$(SUFFIX) $(KDIR)qrotm_k$(TPSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(QROTMKERNEL) + $(CC) -c $(CFLAGS) $(FMAFLAG) -UCOMPLEX -UCOMPLEX -DXDOUBLE $< -o $@ $(KDIR)csrot_k$(TSUFFIX).$(SUFFIX) $(KDIR)csrot_k$(TPSUFFIX).$(PSUFFIX) : $(KERNELDIR)/$(CROTKERNEL) $(CC) -c $(CFLAGS) -DCOMPLEX -DCOMPLEX -UDOUBLE $< -o $@ diff --git a/kernel/alpha/KERNEL b/kernel/alpha/KERNEL index 01734bf9c5..42ae595aa9 100644 --- a/kernel/alpha/KERNEL +++ b/kernel/alpha/KERNEL @@ -122,3 +122,15 @@ ZTRSMKERNEL_LN = ztrsm_kernel_2x2_LN.S ZTRSMKERNEL_LT = ztrsm_kernel_2x2_LT.S ZTRSMKERNEL_RN = ztrsm_kernel_2x2_LT.S ZTRSMKERNEL_RT = ztrsm_kernel_2x2_RT.S + +ifndef SROTMKERNEL +SROTMKERNEL = ../generic/rotm.c +endif + +ifndef DROTMKERNEL +DROTMKERNEL = ../generic/rotm.c +endif + +ifndef QROTMKERNEL +QROTMKERNEL = ../generic/rotm.c +endif diff --git a/kernel/arm/KERNEL b/kernel/arm/KERNEL index aeccfbf4c8..a6ad0bf028 100644 --- a/kernel/arm/KERNEL +++ b/kernel/arm/KERNEL @@ -43,4 +43,14 @@ ifndef ZGEMM_BETA ZGEMM_BETA = ../generic/zgemm_beta.c endif +ifndef SROTMKERNEL +SROTMKERNEL = ../generic/rotm.c +endif + +ifndef DROTMKERNEL +DROTMKERNEL = ../generic/rotm.c +endif +ifndef QROTMKERNEL +QROTMKERNEL = ../generic/rotm.c +endif diff --git a/kernel/arm64/KERNEL b/kernel/arm64/KERNEL index 7d7e648c48..05d95683dc 100644 --- a/kernel/arm64/KERNEL +++ b/kernel/arm64/KERNEL @@ -45,4 +45,14 @@ ifndef ZGEMM_BETA ZGEMM_BETA = ../generic/zgemm_beta.c endif +ifndef SROTMKERNEL +SROTMKERNEL = ../generic/rotm.c +endif + +ifndef DROTMKERNEL +DROTMKERNEL = ../generic/rotm.c +endif +ifndef QROTMKERNEL +QROTMKERNEL = ../generic/rotm.c +endif diff --git a/kernel/arm64/KERNEL.A64FX b/kernel/arm64/KERNEL.A64FX index 4abc840405..75f0f39a7e 100644 --- a/kernel/arm64/KERNEL.A64FX +++ b/kernel/arm64/KERNEL.A64FX @@ -2,5 +2,5 @@ include $(KERNELDIR)/KERNEL.ARMV8SVE SGEMVNKERNEL = gemv_n_sve.c DGEMVNKERNEL = gemv_n_sve.c -SGEMVTKERNEL = gemv_t_sve.c -DGEMVTKERNEL = gemv_t_sve.c +SGEMVTKERNEL = gemv_t_sve_v4x3.c +DGEMVTKERNEL = gemv_t_sve_v4x3.c diff --git a/kernel/arm64/KERNEL.ARMV8SVE b/kernel/arm64/KERNEL.ARMV8SVE index bfadf5cba9..dc58e329fc 100644 --- a/kernel/arm64/KERNEL.ARMV8SVE +++ b/kernel/arm64/KERNEL.ARMV8SVE @@ -64,8 +64,8 @@ DAXPYKERNEL = daxpy_thunderx2t99.S CAXPYKERNEL = zaxpy.S ZAXPYKERNEL = zaxpy.S -SROTKERNEL = rot.S -DROTKERNEL = rot.S +SROTKERNEL = rot.c +DROTKERNEL = rot.c CROTKERNEL = zrot.S ZROTKERNEL = zrot.S @@ -94,8 +94,8 @@ DCOPYKERNEL = copy_thunderx2t99.c CCOPYKERNEL = copy_thunderx2t99.c ZCOPYKERNEL = copy_thunderx2t99.c -SSWAPKERNEL = swap_thunderx2t99.S -DSWAPKERNEL = swap_thunderx2t99.S +SSWAPKERNEL = swap.c +DSWAPKERNEL = swap.c CSWAPKERNEL = swap_thunderx2t99.S ZSWAPKERNEL = swap_thunderx2t99.S @@ -104,10 +104,10 @@ IDAMAXKERNEL = iamax_thunderx2t99.c ICAMAXKERNEL = izamax_thunderx2t99.c IZAMAXKERNEL = izamax_thunderx2t99.c -SNRM2KERNEL = scnrm2_thunderx2t99.c -DNRM2KERNEL = dznrm2_thunderx2t99.c -CNRM2KERNEL = scnrm2_thunderx2t99.c -ZNRM2KERNEL = dznrm2_thunderx2t99.c +SNRM2KERNEL = nrm2.S +DNRM2KERNEL = nrm2.S +CNRM2KERNEL = znrm2.S +ZNRM2KERNEL = znrm2.S DDOTKERNEL = dot.c SDOTKERNEL = dot.c diff --git a/kernel/arm64/KERNEL.NEOVERSEN1 b/kernel/arm64/KERNEL.NEOVERSEN1 index 5b31744730..e623814d6a 100644 --- a/kernel/arm64/KERNEL.NEOVERSEN1 +++ b/kernel/arm64/KERNEL.NEOVERSEN1 @@ -98,8 +98,18 @@ ZNRM2KERNEL = znrm2.S DDOTKERNEL = dot.c SDOTKERNEL = dot.c +ifeq ($(OSNAME), WINNT) +ifeq ($(C_COMPILER), CLANG) +CDOTKERNEL = zdot.S +ZDOTKERNEL = zdot.S +else +CDOTKERNEL = zdot_thunderx2t99.c +ZDOTKERNEL = zdot_thunderx2t99.c +endif +else CDOTKERNEL = zdot_thunderx2t99.c ZDOTKERNEL = zdot_thunderx2t99.c +endif DSDOTKERNEL = dot.S DGEMM_BETA = dgemm_beta.S diff --git a/kernel/arm64/KERNEL.NEOVERSEN2 b/kernel/arm64/KERNEL.NEOVERSEN2 index cabacad46e..2f7400113b 100644 --- a/kernel/arm64/KERNEL.NEOVERSEN2 +++ b/kernel/arm64/KERNEL.NEOVERSEN2 @@ -91,10 +91,10 @@ IDAMAXKERNEL = iamax_thunderx2t99.c ICAMAXKERNEL = izamax_thunderx2t99.c IZAMAXKERNEL = izamax_thunderx2t99.c -SNRM2KERNEL = scnrm2_thunderx2t99.c -DNRM2KERNEL = dznrm2_thunderx2t99.c -CNRM2KERNEL = scnrm2_thunderx2t99.c -ZNRM2KERNEL = dznrm2_thunderx2t99.c +SNRM2KERNEL = nrm2.S +DNRM2KERNEL = nrm2.S +CNRM2KERNEL = znrm2.S +ZNRM2KERNEL = znrm2.S DDOTKERNEL = dot.c SDOTKERNEL = dot.c diff --git a/kernel/arm64/KERNEL.NEOVERSEV1 b/kernel/arm64/KERNEL.NEOVERSEV1 index 53d157a0aa..859466409e 100644 --- a/kernel/arm64/KERNEL.NEOVERSEV1 +++ b/kernel/arm64/KERNEL.NEOVERSEV1 @@ -1,4 +1,4 @@ include $(KERNELDIR)/KERNEL.ARMV8SVE -SGEMVTKERNEL = gemv_t_sve.c -DGEMVTKERNEL = gemv_t_sve.c +SGEMVTKERNEL = gemv_t_sve_v1x3.c +DGEMVTKERNEL = gemv_t_sve_v1x3.c diff --git a/kernel/arm64/KERNEL.generic b/kernel/arm64/KERNEL.generic index 838adb05ab..65c301e686 100644 --- a/kernel/arm64/KERNEL.generic +++ b/kernel/arm64/KERNEL.generic @@ -171,3 +171,15 @@ QCABS_KERNEL = ../generic/cabs.c #Dump kernel CGEMM3MKERNEL = ../generic/zgemm3mkernel_dump.c ZGEMM3MKERNEL = ../generic/zgemm3mkernel_dump.c + +ifndef SROTMKERNEL +SROTMKERNEL = ../generic/rotm.c +endif + +ifndef DROTMKERNEL +DROTMKERNEL = ../generic/rotm.c +endif + +ifndef QROTMKERNEL +QROTMKERNEL = ../generic/rotm.c +endif diff --git a/kernel/arm64/copy_thunderx2t99.c b/kernel/arm64/copy_thunderx2t99.c index e318761391..263cc30130 100644 --- a/kernel/arm64/copy_thunderx2t99.c +++ b/kernel/arm64/copy_thunderx2t99.c @@ -1,216 +1,217 @@ -/*************************************************************************** -Copyright (c) 2017, The OpenBLAS Project -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: -1. Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in -the documentation and/or other materials provided with the -distribution. -3. Neither the name of the OpenBLAS project nor the names of -its contributors may be used to endorse or promote products -derived from this software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*****************************************************************************/ - -#include "common.h" - -#include -#define N "x0" /* vector length */ -#define X "x1" /* X vector address */ -#define INC_X "x2" /* X stride */ -#define Y "x3" /* Y vector address */ -#define INC_Y "x4" /* Y stride */ -#define J "x5" /* loop variable */ - -/******************************************************************************* -* Macro definitions -*******************************************************************************/ -#if !defined(COMPLEX) -#if !defined(DOUBLE) -#define TMPF "s0" -#define INC_SHIFT "2" -#define N_DIV_SHIFT "2" -#define N_REM_MASK "3" -#else -#define TMPF "d0" -#define INC_SHIFT "3" -#define N_DIV_SHIFT "1" -#define N_REM_MASK "1" -#endif -#else -#if !defined(DOUBLE) -#define TMPF "d0" -#define INC_SHIFT "3" -#define N_DIV_SHIFT "1" -#define N_REM_MASK "1" -#else -#define TMPF "q0" -#define INC_SHIFT "4" -#define N_DIV_SHIFT "0" -#define N_REM_MASK "0" -#endif -#endif - -#define KERNEL_F1 \ - "ldr "TMPF", ["X"] \n" \ - "add "X", "X", "INC_X" \n" \ - "str "TMPF", ["Y"] \n" \ - "add "Y", "Y", "INC_Y" \n" - -#define KERNEL_F \ - "ldr q0, ["X"], #16 \n" \ - "str q0, ["Y"], #16 \n" - -#define INIT \ - "lsl "INC_X", "INC_X", #"INC_SHIFT" \n" \ - "lsl "INC_Y", "INC_Y", #"INC_SHIFT" \n" - - -static int do_copy(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) -{ - if ( n < 0 ) return 0; - - __asm__ __volatile__ ( - " mov "N", %[N_] \n" - " mov "X", %[X_] \n" - " mov "INC_X", %[INCX_] \n" - " mov "Y", %[Y_] \n" - " mov "INC_Y", %[INCY_] \n" - " cmp "N", xzr \n" - " ble 8f //copy_kernel_L999 \n" - " cmp "INC_X", #1 \n" - " bne 4f //copy_kernel_S_BEGIN \n" - " cmp "INC_Y", #1 \n" - " bne 4f //copy_kernel_S_BEGIN \n" - - "// .Lcopy_kernel_F_BEGIN: \n" - " "INIT" \n" - " asr "J", "N", #"N_DIV_SHIFT" \n" - " cmp "J", xzr \n" - " beq 2f //copy_kernel_F1 \n" - " .align 5 \n" - - "1: //copy_kernel_F: \n" - " "KERNEL_F" \n" - " subs "J", "J", #1 \n" - " bne 1b //copy_kernel_F \n" - - "2: //copy_kernel_F1: \n" -#if defined(COMPLEX) && defined(DOUBLE) - " b 8f //copy_kernel_L999 \n" -#else - " ands "J", "N", #"N_REM_MASK" \n" - " ble 8f //copy_kernel_L999 \n" -#endif - - "3: //copy_kernel_F10: \n" - " "KERNEL_F1" \n" - " subs "J", "J", #1 \n" - " bne 3b //copy_kernel_F10 \n" - " b 8f //copy_kernel_L999 \n" - - "4: //copy_kernel_S_BEGIN: \n" - " "INIT" \n" - " asr "J", "N", #2 \n" - " cmp "J", xzr \n" - " ble 6f //copy_kernel_S1 \n" - - "5: //copy_kernel_S4: \n" - " "KERNEL_F1" \n" - " "KERNEL_F1" \n" - " "KERNEL_F1" \n" - " "KERNEL_F1" \n" - " subs "J", "J", #1 \n" - " bne 5b //copy_kernel_S4 \n" - - "6: //copy_kernel_S1: \n" - " ands "J", "N", #3 \n" - " ble 8f //copy_kernel_L999 \n" - - "7: //copy_kernel_S10: \n" - " "KERNEL_F1" \n" - " subs "J", "J", #1 \n" - " bne 7b //copy_kernel_S10 \n" - - "8: //copy_kernel_L999: \n" - - : - : [N_] "r" (n), //%1 - [X_] "r" (x), //%2 - [INCX_] "r" (inc_x), //%3 - [Y_] "r" (y), //%4 - [INCY_] "r" (inc_y) //%5 - : "cc", - "memory", - "x0", "x1", "x2", "x3", "x4", "x5", - "d0" - ); - - return 0; -} - -#if defined(SMP) -static int copy_thread_function(BLASLONG n, BLASLONG dummy0, - BLASLONG dummy1, FLOAT dummy2, FLOAT *x, BLASLONG inc_x, FLOAT *y, - BLASLONG inc_y, FLOAT *dummy3, BLASLONG dummy4) -{ - do_copy(n, x, inc_x, y, inc_y); - - return 0; -} -#endif - -int CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) -{ -#if defined(SMP) - int nthreads; - FLOAT dummy_alpha; -#endif - - if (n <= 0) return 0; - -#if defined(SMP) - if (inc_x == 0 || n <= 10000) - nthreads = 1; - else - nthreads = num_cpu_avail(1); - - if (nthreads == 1) { - do_copy(n, x, inc_x, y, inc_y); - } else { - int mode = 0; - -#if !defined(COMPLEX) - mode = BLAS_REAL; -#else - mode = BLAS_COMPLEX; -#endif -#if !defined(DOUBLE) - mode |= BLAS_SINGLE; -#else - mode |= BLAS_DOUBLE; -#endif - - blas_level1_thread(mode, n, 0, 0, &dummy_alpha, - x, inc_x, y, inc_y, NULL, 0, - ( void *)copy_thread_function, nthreads); - } -#else - do_copy(n, x, inc_x, y, inc_y); -#endif - - return 0; -} +/*************************************************************************** +Copyright (c) 2017, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#include +#define N "x0" /* vector length */ +#define X "x1" /* X vector address */ +#define INC_X "x2" /* X stride */ +#define Y "x3" /* Y vector address */ +#define INC_Y "x4" /* Y stride */ +#define J "x5" /* loop variable */ + +/******************************************************************************* +* Macro definitions +*******************************************************************************/ +#if !defined(COMPLEX) +#if !defined(DOUBLE) +#define TMPF "s0" +#define INC_SHIFT "2" +#define N_DIV_SHIFT "2" +#define N_REM_MASK "3" +#else +#define TMPF "d0" +#define INC_SHIFT "3" +#define N_DIV_SHIFT "1" +#define N_REM_MASK "1" +#endif +#else +#if !defined(DOUBLE) +#define TMPF "d0" +#define INC_SHIFT "3" +#define N_DIV_SHIFT "1" +#define N_REM_MASK "1" +#else +#define TMPF "q0" +#define INC_SHIFT "4" +#define N_DIV_SHIFT "0" +#define N_REM_MASK "0" +#endif +#endif + +#define KERNEL_F1 \ + "ldr "TMPF", ["X"] \n" \ + "add "X", "X", "INC_X" \n" \ + "str "TMPF", ["Y"] \n" \ + "add "Y", "Y", "INC_Y" \n" + +#define KERNEL_F \ + "ldr q0, ["X"], #16 \n" \ + "str q0, ["Y"], #16 \n" + +#define INIT \ + "lsl "INC_X", "INC_X", #"INC_SHIFT" \n" \ + "lsl "INC_Y", "INC_Y", #"INC_SHIFT" \n" + + +static int do_copy(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) +{ + if ( n < 0 ) return 0; + + __asm__ __volatile__ ( + " mov "N", %[N_] \n" + " mov "X", %[X_] \n" + " mov "INC_X", %[INCX_] \n" + " mov "Y", %[Y_] \n" + " mov "INC_Y", %[INCY_] \n" + " cmp "N", xzr \n" + " ble 8f //copy_kernel_L999 \n" + " cmp "INC_X", #1 \n" + " bne 4f //copy_kernel_S_BEGIN \n" + " cmp "INC_Y", #1 \n" + " bne 4f //copy_kernel_S_BEGIN \n" + + "// .Lcopy_kernel_F_BEGIN: \n" + " "INIT" \n" + " asr "J", "N", #"N_DIV_SHIFT" \n" + " cmp "J", xzr \n" + " beq 2f //copy_kernel_F1 \n" +#if !(defined(__clang__) && defined(OS_WINDOWS)) + " .align 5 \n" +#endif + "1: //copy_kernel_F: \n" + " "KERNEL_F" \n" + " subs "J", "J", #1 \n" + " bne 1b //copy_kernel_F \n" + + "2: //copy_kernel_F1: \n" +#if defined(COMPLEX) && defined(DOUBLE) + " b 8f //copy_kernel_L999 \n" +#else + " ands "J", "N", #"N_REM_MASK" \n" + " ble 8f //copy_kernel_L999 \n" +#endif + + "3: //copy_kernel_F10: \n" + " "KERNEL_F1" \n" + " subs "J", "J", #1 \n" + " bne 3b //copy_kernel_F10 \n" + " b 8f //copy_kernel_L999 \n" + + "4: //copy_kernel_S_BEGIN: \n" + " "INIT" \n" + " asr "J", "N", #2 \n" + " cmp "J", xzr \n" + " ble 6f //copy_kernel_S1 \n" + + "5: //copy_kernel_S4: \n" + " "KERNEL_F1" \n" + " "KERNEL_F1" \n" + " "KERNEL_F1" \n" + " "KERNEL_F1" \n" + " subs "J", "J", #1 \n" + " bne 5b //copy_kernel_S4 \n" + + "6: //copy_kernel_S1: \n" + " ands "J", "N", #3 \n" + " ble 8f //copy_kernel_L999 \n" + + "7: //copy_kernel_S10: \n" + " "KERNEL_F1" \n" + " subs "J", "J", #1 \n" + " bne 7b //copy_kernel_S10 \n" + + "8: //copy_kernel_L999: \n" + + : + : [N_] "r" (n), //%1 + [X_] "r" (x), //%2 + [INCX_] "r" (inc_x), //%3 + [Y_] "r" (y), //%4 + [INCY_] "r" (inc_y) //%5 + : "cc", + "memory", + "x0", "x1", "x2", "x3", "x4", "x5", + "d0" + ); + + return 0; +} + +#if defined(SMP) +static int copy_thread_function(BLASLONG n, BLASLONG dummy0, + BLASLONG dummy1, FLOAT dummy2, FLOAT *x, BLASLONG inc_x, FLOAT *y, + BLASLONG inc_y, FLOAT *dummy3, BLASLONG dummy4) +{ + do_copy(n, x, inc_x, y, inc_y); + + return 0; +} +#endif + +int CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) +{ +#if defined(SMP) + int nthreads; + FLOAT dummy_alpha; +#endif + + if (n <= 0) return 0; + +#if defined(SMP) + if (inc_x == 0 || n <= 10000) + nthreads = 1; + else + nthreads = num_cpu_avail(1); + + if (nthreads == 1) { + do_copy(n, x, inc_x, y, inc_y); + } else { + int mode = 0; + +#if !defined(COMPLEX) + mode = BLAS_REAL; +#else + mode = BLAS_COMPLEX; +#endif +#if !defined(DOUBLE) + mode |= BLAS_SINGLE; +#else + mode |= BLAS_DOUBLE; +#endif + + blas_level1_thread(mode, n, 0, 0, &dummy_alpha, + x, inc_x, y, inc_y, NULL, 0, + ( void *)copy_thread_function, nthreads); + } +#else + do_copy(n, x, inc_x, y, inc_y); +#endif + + return 0; +} diff --git a/kernel/arm64/dasum_thunderx2t99.c b/kernel/arm64/dasum_thunderx2t99.c index a212c9534b..b554f0a9b1 100644 --- a/kernel/arm64/dasum_thunderx2t99.c +++ b/kernel/arm64/dasum_thunderx2t99.c @@ -152,7 +152,9 @@ static FLOAT dasum_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x) " cmp "J", xzr \n" " beq 3f //asum_kernel_F1 \n" +#if !(defined(__clang__) && defined(OS_WINDOWS)) ".align 5 \n" +#endif "2: //asum_kernel_F32: \n" " "KERNEL_F32" \n" " subs "J", "J", #1 \n" diff --git a/kernel/arm64/dgemm_small_kernel_tn_sve.c b/kernel/arm64/dgemm_small_kernel_tn_sve.c index 2ef23d7ee4..8419e50655 100644 --- a/kernel/arm64/dgemm_small_kernel_tn_sve.c +++ b/kernel/arm64/dgemm_small_kernel_tn_sve.c @@ -213,7 +213,7 @@ CNAME(BLASLONG M, const BLASLONG n2 = N & -2; const BLASLONG n8 = N & -8; - const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; + const int pack_a = M >= v_size2 && N >= 8 ? 1 : 0; FLOAT* packed_a = (pack_a) ? packed_a = (FLOAT*)malloc(K * v_size2 * sizeof(FLOAT)) : NULL; diff --git a/kernel/arm64/dgemm_small_kernel_tt_sve.c b/kernel/arm64/dgemm_small_kernel_tt_sve.c index efe11a9f9b..0f06b4ecbd 100644 --- a/kernel/arm64/dgemm_small_kernel_tt_sve.c +++ b/kernel/arm64/dgemm_small_kernel_tt_sve.c @@ -219,7 +219,7 @@ CNAME(BLASLONG M, const BLASLONG n4 = N & -4; const BLASLONG n2 = N & -2; - const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; + const int pack_a = M >= v_size2 && N >= 8 ? 1 : 0; FLOAT* packed_a = (pack_a) ? packed_a = (FLOAT*)malloc(K * v_size2 * sizeof(FLOAT)) : NULL; diff --git a/kernel/arm64/dot_kernel_asimd.c b/kernel/arm64/dot_kernel_asimd.c index 1288838f87..a404c96368 100644 --- a/kernel/arm64/dot_kernel_asimd.c +++ b/kernel/arm64/dot_kernel_asimd.c @@ -285,8 +285,9 @@ static RETURN_TYPE dot_kernel_asimd(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT " asr %[J_], %[N_], #"N_DIV_SHIFT" \n" " cmp %[J_], xzr \n" " beq 3f //dot_kernel_F1 \n" - +#if !(defined(__clang__) && defined(OS_WINDOWS)) " .align 5 \n" +#endif "2: //dot_kernel_F: \n" " "KERNEL_F" \n" " subs %[J_], %[J_], #1 \n" diff --git a/kernel/arm64/gemv_t_sve_v1x3.c b/kernel/arm64/gemv_t_sve_v1x3.c new file mode 100644 index 0000000000..bcd0de0bf1 --- /dev/null +++ b/kernel/arm64/gemv_t_sve_v1x3.c @@ -0,0 +1,168 @@ +/*************************************************************************** +Copyright (c) 2024, 2025 The OpenBLAS Project +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include + +#include "common.h" + +#ifdef DOUBLE +#define SV_COUNT svcntd +#define SV_TYPE svfloat64_t +#define SV_TRUE svptrue_b64 +#define SV_WHILE svwhilelt_b64_s64 +#define SV_DUP svdup_f64 +#else +#define SV_COUNT svcntw +#define SV_TYPE svfloat32_t +#define SV_TRUE svptrue_b32 +#define SV_WHILE svwhilelt_b32_s64 +#define SV_DUP svdup_f32 +#endif + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, + BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, + FLOAT *buffer) +{ + BLASLONG i; + BLASLONG ix,iy; + BLASLONG j; + FLOAT *a_ptr; + FLOAT *y_ptr; + FLOAT temp; + + iy = 0; + + if (inc_x == 1) { + BLASLONG width = n / 3; + BLASLONG sve_size = SV_COUNT(); + svbool_t pg_true = SV_TRUE(); + svbool_t pg = SV_WHILE(0, m % sve_size); + + FLOAT *a0_ptr = a + lda * width * 0; + FLOAT *a1_ptr = a + lda * width * 1; + FLOAT *a2_ptr = a + lda * width * 2; + + FLOAT *y0_ptr = y + inc_y * width * 0; + FLOAT *y1_ptr = y + inc_y * width * 1; + FLOAT *y2_ptr = y + inc_y * width * 2; + + for (j = 0; j < width; j++) { + SV_TYPE temp00_vec = SV_DUP(0.0); + SV_TYPE temp01_vec = SV_DUP(0.0); + SV_TYPE temp02_vec = SV_DUP(0.0); + + i = 0; + while ((i + sve_size * 1 - 1) < m) { + SV_TYPE x0_vec = svld1(pg_true, x + i); + + SV_TYPE a00_vec = svld1(pg_true, a0_ptr + i); + SV_TYPE a01_vec = svld1(pg_true, a1_ptr + i); + SV_TYPE a02_vec = svld1(pg_true, a2_ptr + i); + + temp00_vec = svmla_x(pg_true, temp00_vec, a00_vec, x0_vec); + temp01_vec = svmla_x(pg_true, temp01_vec, a01_vec, x0_vec); + temp02_vec = svmla_x(pg_true, temp02_vec, a02_vec, x0_vec); + + i += sve_size * 1; + } + + if (i < m) { + SV_TYPE x0_vec = svld1(pg, x + i); + + SV_TYPE a00_vec = svld1(pg, a0_ptr + i); + SV_TYPE a01_vec = svld1(pg, a1_ptr + i); + SV_TYPE a02_vec = svld1(pg, a2_ptr + i); + + temp00_vec = svmla_m(pg, temp00_vec, a00_vec, x0_vec); + temp01_vec = svmla_m(pg, temp01_vec, a01_vec, x0_vec); + temp02_vec = svmla_m(pg, temp02_vec, a02_vec, x0_vec); + } + + y0_ptr[iy] += alpha * svaddv(pg_true, temp00_vec); + y1_ptr[iy] += alpha * svaddv(pg_true, temp01_vec); + y2_ptr[iy] += alpha * svaddv(pg_true, temp02_vec); + + iy += inc_y; + + a0_ptr += lda; + a1_ptr += lda; + a2_ptr += lda; + } + + a_ptr = a2_ptr; + y_ptr = y2_ptr; + for (j = width * 3; j < n; j++) { + SV_TYPE temp_vec = SV_DUP(0.0); + + i = 0; + while ((i + sve_size * 1 - 1) < m) { + SV_TYPE x_vec = svld1(pg_true, x + i); + + SV_TYPE a_vec = svld1(pg_true, a_ptr + i); + + temp_vec = svmla_x(pg_true, temp_vec, a_vec, x_vec); + + i += sve_size * 1; + } + + if (i < m) { + SV_TYPE x_vec = svld1(pg, x + i); + + SV_TYPE a_vec = svld1(pg, a_ptr + i); + + temp_vec = svmla_m(pg, temp_vec, a_vec, x_vec); + } + + y_ptr[iy] += alpha * svaddv(pg_true, temp_vec); + + iy += inc_y; + + a_ptr += lda; + } + + return(0); + } + + a_ptr = a; + for (j = 0; j < n; j++) { + temp = 0.0; + ix = 0; + for (i = 0; i < m; i++) { + temp += a_ptr[i] * x[ix]; + ix += inc_x; + } + y[iy] += alpha * temp; + iy += inc_y; + a_ptr += lda; + } + return(0); +} diff --git a/kernel/arm64/gemv_t_sve_v4x3.c b/kernel/arm64/gemv_t_sve_v4x3.c new file mode 100644 index 0000000000..77c46feb34 --- /dev/null +++ b/kernel/arm64/gemv_t_sve_v4x3.c @@ -0,0 +1,234 @@ +/*************************************************************************** +Copyright (c) 2024, The OpenBLAS Project +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include + +#include "common.h" + +#ifdef DOUBLE +#define SV_COUNT svcntd +#define SV_TYPE svfloat64_t +#define SV_TRUE svptrue_b64 +#define SV_WHILE svwhilelt_b64_s64 +#define SV_DUP svdup_f64 +#else +#define SV_COUNT svcntw +#define SV_TYPE svfloat32_t +#define SV_TRUE svptrue_b32 +#define SV_WHILE svwhilelt_b32_s64 +#define SV_DUP svdup_f32 +#endif + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, + BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, + FLOAT *buffer) +{ + BLASLONG i; + BLASLONG ix,iy; + BLASLONG j; + FLOAT *a_ptr; + FLOAT temp; + + iy = 0; + + if (inc_x == 1) { + BLASLONG width = (n + 3 - 1) / 3; + + FLOAT *a0_ptr = a + lda * width * 0; + FLOAT *a1_ptr = a + lda * width * 1; + FLOAT *a2_ptr = a + lda * width * 2; + + FLOAT *y0_ptr = y + inc_y * width * 0; + FLOAT *y1_ptr = y + inc_y * width * 1; + FLOAT *y2_ptr = y + inc_y * width * 2; + + for (j = 0; j < width; j++) { + svbool_t pg00 = ((j + width * 0) < n) ? SV_TRUE() : svpfalse(); + svbool_t pg10 = ((j + width * 0) < n) ? SV_TRUE() : svpfalse(); + svbool_t pg20 = ((j + width * 0) < n) ? SV_TRUE() : svpfalse(); + svbool_t pg30 = ((j + width * 0) < n) ? SV_TRUE() : svpfalse(); + svbool_t pg01 = ((j + width * 1) < n) ? SV_TRUE() : svpfalse(); + svbool_t pg11 = ((j + width * 1) < n) ? SV_TRUE() : svpfalse(); + svbool_t pg21 = ((j + width * 1) < n) ? SV_TRUE() : svpfalse(); + svbool_t pg31 = ((j + width * 1) < n) ? SV_TRUE() : svpfalse(); + svbool_t pg02 = ((j + width * 2) < n) ? SV_TRUE() : svpfalse(); + svbool_t pg12 = ((j + width * 2) < n) ? SV_TRUE() : svpfalse(); + svbool_t pg22 = ((j + width * 2) < n) ? SV_TRUE() : svpfalse(); + svbool_t pg32 = ((j + width * 2) < n) ? SV_TRUE() : svpfalse(); + + SV_TYPE temp00_vec = SV_DUP(0.0); + SV_TYPE temp10_vec = SV_DUP(0.0); + SV_TYPE temp20_vec = SV_DUP(0.0); + SV_TYPE temp30_vec = SV_DUP(0.0); + SV_TYPE temp01_vec = SV_DUP(0.0); + SV_TYPE temp11_vec = SV_DUP(0.0); + SV_TYPE temp21_vec = SV_DUP(0.0); + SV_TYPE temp31_vec = SV_DUP(0.0); + SV_TYPE temp02_vec = SV_DUP(0.0); + SV_TYPE temp12_vec = SV_DUP(0.0); + SV_TYPE temp22_vec = SV_DUP(0.0); + SV_TYPE temp32_vec = SV_DUP(0.0); + + i = 0; + BLASLONG sve_size = SV_COUNT(); + while ((i + sve_size * 4 - 1) < m) { + SV_TYPE x0_vec = svld1_vnum(SV_TRUE(), x + i, 0); + SV_TYPE x1_vec = svld1_vnum(SV_TRUE(), x + i, 1); + SV_TYPE x2_vec = svld1_vnum(SV_TRUE(), x + i, 2); + SV_TYPE x3_vec = svld1_vnum(SV_TRUE(), x + i, 3); + + SV_TYPE a00_vec = svld1_vnum(pg00, a0_ptr + i, 0); + SV_TYPE a10_vec = svld1_vnum(pg10, a0_ptr + i, 1); + SV_TYPE a20_vec = svld1_vnum(pg20, a0_ptr + i, 2); + SV_TYPE a30_vec = svld1_vnum(pg30, a0_ptr + i, 3); + SV_TYPE a01_vec = svld1_vnum(pg01, a1_ptr + i, 0); + SV_TYPE a11_vec = svld1_vnum(pg11, a1_ptr + i, 1); + SV_TYPE a21_vec = svld1_vnum(pg21, a1_ptr + i, 2); + SV_TYPE a31_vec = svld1_vnum(pg31, a1_ptr + i, 3); + SV_TYPE a02_vec = svld1_vnum(pg02, a2_ptr + i, 0); + SV_TYPE a12_vec = svld1_vnum(pg12, a2_ptr + i, 1); + SV_TYPE a22_vec = svld1_vnum(pg22, a2_ptr + i, 2); + SV_TYPE a32_vec = svld1_vnum(pg32, a2_ptr + i, 3); + + temp00_vec = svmla_m(pg00, temp00_vec, a00_vec, x0_vec); + temp10_vec = svmla_m(pg10, temp10_vec, a10_vec, x1_vec); + temp20_vec = svmla_m(pg20, temp20_vec, a20_vec, x2_vec); + temp30_vec = svmla_m(pg30, temp30_vec, a30_vec, x3_vec); + temp01_vec = svmla_m(pg01, temp01_vec, a01_vec, x0_vec); + temp11_vec = svmla_m(pg11, temp11_vec, a11_vec, x1_vec); + temp21_vec = svmla_m(pg21, temp21_vec, a21_vec, x2_vec); + temp31_vec = svmla_m(pg31, temp31_vec, a31_vec, x3_vec); + temp02_vec = svmla_m(pg02, temp02_vec, a02_vec, x0_vec); + temp12_vec = svmla_m(pg12, temp12_vec, a12_vec, x1_vec); + temp22_vec = svmla_m(pg22, temp22_vec, a22_vec, x2_vec); + temp32_vec = svmla_m(pg32, temp32_vec, a32_vec, x3_vec); + + i += sve_size * 4; + } + + if (i < m) { + svbool_t pg0 = SV_WHILE(i + sve_size * 0, m); + svbool_t pg1 = SV_WHILE(i + sve_size * 1, m); + svbool_t pg2 = SV_WHILE(i + sve_size * 2, m); + svbool_t pg3 = SV_WHILE(i + sve_size * 3, m); + + pg00 = svand_z(SV_TRUE(), pg0, pg00); + pg10 = svand_z(SV_TRUE(), pg1, pg10); + pg20 = svand_z(SV_TRUE(), pg2, pg20); + pg30 = svand_z(SV_TRUE(), pg3, pg30); + pg01 = svand_z(SV_TRUE(), pg0, pg01); + pg11 = svand_z(SV_TRUE(), pg1, pg11); + pg21 = svand_z(SV_TRUE(), pg2, pg21); + pg31 = svand_z(SV_TRUE(), pg3, pg31); + pg02 = svand_z(SV_TRUE(), pg0, pg02); + pg12 = svand_z(SV_TRUE(), pg1, pg12); + pg22 = svand_z(SV_TRUE(), pg2, pg22); + pg32 = svand_z(SV_TRUE(), pg3, pg32); + + SV_TYPE x0_vec = svld1_vnum(pg0, x + i, 0); + SV_TYPE x1_vec = svld1_vnum(pg1, x + i, 1); + SV_TYPE x2_vec = svld1_vnum(pg2, x + i, 2); + SV_TYPE x3_vec = svld1_vnum(pg3, x + i, 3); + + SV_TYPE a00_vec = svld1_vnum(pg00, a0_ptr + i, 0); + SV_TYPE a10_vec = svld1_vnum(pg10, a0_ptr + i, 1); + SV_TYPE a20_vec = svld1_vnum(pg20, a0_ptr + i, 2); + SV_TYPE a30_vec = svld1_vnum(pg30, a0_ptr + i, 3); + SV_TYPE a01_vec = svld1_vnum(pg01, a1_ptr + i, 0); + SV_TYPE a11_vec = svld1_vnum(pg11, a1_ptr + i, 1); + SV_TYPE a21_vec = svld1_vnum(pg21, a1_ptr + i, 2); + SV_TYPE a31_vec = svld1_vnum(pg31, a1_ptr + i, 3); + SV_TYPE a02_vec = svld1_vnum(pg02, a2_ptr + i, 0); + SV_TYPE a12_vec = svld1_vnum(pg12, a2_ptr + i, 1); + SV_TYPE a22_vec = svld1_vnum(pg22, a2_ptr + i, 2); + SV_TYPE a32_vec = svld1_vnum(pg32, a2_ptr + i, 3); + + temp00_vec = svmla_m(pg00, temp00_vec, a00_vec, x0_vec); + temp10_vec = svmla_m(pg10, temp10_vec, a10_vec, x1_vec); + temp20_vec = svmla_m(pg20, temp20_vec, a20_vec, x2_vec); + temp30_vec = svmla_m(pg30, temp30_vec, a30_vec, x3_vec); + temp01_vec = svmla_m(pg01, temp01_vec, a01_vec, x0_vec); + temp11_vec = svmla_m(pg11, temp11_vec, a11_vec, x1_vec); + temp21_vec = svmla_m(pg21, temp21_vec, a21_vec, x2_vec); + temp31_vec = svmla_m(pg31, temp31_vec, a31_vec, x3_vec); + temp02_vec = svmla_m(pg02, temp02_vec, a02_vec, x0_vec); + temp12_vec = svmla_m(pg12, temp12_vec, a12_vec, x1_vec); + temp22_vec = svmla_m(pg22, temp22_vec, a22_vec, x2_vec); + temp32_vec = svmla_m(pg32, temp32_vec, a32_vec, x3_vec); + } + + temp00_vec = svadd_x(SV_TRUE(), temp00_vec, temp10_vec); + temp01_vec = svadd_x(SV_TRUE(), temp01_vec, temp11_vec); + temp02_vec = svadd_x(SV_TRUE(), temp02_vec, temp12_vec); + temp20_vec = svadd_x(SV_TRUE(), temp20_vec, temp30_vec); + temp21_vec = svadd_x(SV_TRUE(), temp21_vec, temp31_vec); + temp22_vec = svadd_x(SV_TRUE(), temp22_vec, temp32_vec); + temp00_vec = svadd_x(SV_TRUE(), temp00_vec, temp20_vec); + temp01_vec = svadd_x(SV_TRUE(), temp01_vec, temp21_vec); + temp02_vec = svadd_x(SV_TRUE(), temp02_vec, temp22_vec); + + if ((j + width * 0) < n) { + temp = svaddv(SV_TRUE(), temp00_vec); + y0_ptr[iy] += alpha * temp; + } + if ((j + width * 1) < n) { + temp = svaddv(SV_TRUE(), temp01_vec); + y1_ptr[iy] += alpha * temp; + } + if ((j + width * 2) < n) { + temp = svaddv(SV_TRUE(), temp02_vec); + y2_ptr[iy] += alpha * temp; + } + iy += inc_y; + + a0_ptr += lda; + a1_ptr += lda; + a2_ptr += lda; + } + + return(0); + } + + a_ptr = a; + for (j = 0; j < n; j++) { + temp = 0.0; + ix = 0; + for (i = 0; i < m; i++) { + temp += a_ptr[i] * x[ix]; + ix += inc_x; + } + y[iy] += alpha * temp; + iy += inc_y; + a_ptr += lda; + } + return(0); +} diff --git a/kernel/arm64/rot.c b/kernel/arm64/rot.c new file mode 100644 index 0000000000..09b708494c --- /dev/null +++ b/kernel/arm64/rot.c @@ -0,0 +1,40 @@ +/******************************************************************************* +Copyright (c) 2015, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*******************************************************************************/ +#include "common.h" +#include "rot_kernel_sve.c" +#include "rot_kernel_c.c" + +int CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT c, FLOAT s) +{ + if (n <= 0) + return (0); + if (inc_x == 1 && inc_y == 1) + rot_kernel_sve(n, x, y, c, s); + else + rot_kernel_c(n, x, inc_x, y, inc_y, c, s); + return (0); +} \ No newline at end of file diff --git a/kernel/arm64/rot_kernel_c.c b/kernel/arm64/rot_kernel_c.c new file mode 100644 index 0000000000..788beed7a5 --- /dev/null +++ b/kernel/arm64/rot_kernel_c.c @@ -0,0 +1,44 @@ +/******************************************************************************* +Copyright (c) 2015, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*******************************************************************************/ +#include "common.h" + +static int rot_kernel_c(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT c, FLOAT s) +{ + BLASLONG i = 0; + BLASLONG ix = 0, iy = 0; + FLOAT temp; + while (i < n) + { + temp = c * x[ix] + s * y[iy]; + y[iy] = c * y[iy] - s * x[ix]; + x[ix] = temp; + ix += inc_x; + iy += inc_y; + i++; + } + return (0); +} \ No newline at end of file diff --git a/kernel/arm64/rot_kernel_sve.c b/kernel/arm64/rot_kernel_sve.c new file mode 100644 index 0000000000..1d54a2907e --- /dev/null +++ b/kernel/arm64/rot_kernel_sve.c @@ -0,0 +1,59 @@ +/******************************************************************************* +Copyright (c) 2015, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*******************************************************************************/ +#include "common.h" +#include + +#ifdef DOUBLE +#define SVE_TYPE svfloat64_t +#define SVE_ZERO svdup_f64(0.0) +#define SVE_WHILELT svwhilelt_b64 +#define SVE_ALL svptrue_b64() +#define SVE_WIDTH svcntd() +#else +#define SVE_TYPE svfloat32_t +#define SVE_ZERO svdup_f32(0.0) +#define SVE_WHILELT svwhilelt_b32 +#define SVE_ALL svptrue_b32() +#define SVE_WIDTH svcntw() +#endif + +static int rot_kernel_sve(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT c, FLOAT s) +{ + for (BLASLONG i = 0; i < n; i += SVE_WIDTH) + { + svbool_t pg = SVE_WHILELT((uint64_t)i, (uint64_t)n); + SVE_TYPE x_vec = svld1(pg, &x[i]); + SVE_TYPE y_vec = svld1(pg, &y[i]); + SVE_TYPE cx_vec = svmul_z(pg, x_vec, c); + SVE_TYPE sy_vec = svmul_z(pg, y_vec, s); + SVE_TYPE sx_vec = svmul_z(pg, x_vec, s); + SVE_TYPE cy_vec = svmul_z(pg, y_vec, c); + svst1(pg, &x[i], svadd_z(pg, cx_vec, sy_vec)); + svst1(pg, &y[i], svsub_z(pg, cy_vec, sx_vec)); + } + return (0); +} \ No newline at end of file diff --git a/kernel/arm64/sasum_thunderx2t99.c b/kernel/arm64/sasum_thunderx2t99.c index 014c667bac..2db1e69e74 100644 --- a/kernel/arm64/sasum_thunderx2t99.c +++ b/kernel/arm64/sasum_thunderx2t99.c @@ -153,8 +153,9 @@ static FLOAT sasum_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x) " asr "J", "N", #6 \n" " cmp "J", xzr \n" " beq 3f //asum_kernel_F1 \n" - +#if !(defined(__clang__) && defined(OS_WINDOWS)) ".align 5 \n" +#endif "2: //asum_kernel_F64: \n" " "KERNEL_F64" \n" " subs "J", "J", #1 \n" diff --git a/kernel/arm64/sgemm_small_kernel_tn_sve.c b/kernel/arm64/sgemm_small_kernel_tn_sve.c index 1146409504..c874af4005 100644 --- a/kernel/arm64/sgemm_small_kernel_tn_sve.c +++ b/kernel/arm64/sgemm_small_kernel_tn_sve.c @@ -222,7 +222,7 @@ CNAME(BLASLONG M, const BLASLONG n8 = N & -8; const BLASLONG n4 = N & -4; - const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; + const int pack_a = M >= v_size2 && N >= 8 ? 1 : 0; FLOAT* packed_a = (pack_a) ? packed_a = (FLOAT*)malloc(K * v_size2 * sizeof(FLOAT)) : NULL; diff --git a/kernel/arm64/sgemm_small_kernel_tt_sve.c b/kernel/arm64/sgemm_small_kernel_tt_sve.c index 731c9861b8..b29e3e46b5 100644 --- a/kernel/arm64/sgemm_small_kernel_tt_sve.c +++ b/kernel/arm64/sgemm_small_kernel_tt_sve.c @@ -223,7 +223,7 @@ CNAME(BLASLONG M, const BLASLONG n8 = N & -8; const BLASLONG n4 = N & -4; - const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; + const int pack_a = M >= v_size2 && N >= 8 ? 1 : 0; FLOAT* packed_a = (pack_a) ? packed_a = (FLOAT*)malloc(K * v_size2 * sizeof(FLOAT)) : NULL; diff --git a/kernel/arm64/swap.c b/kernel/arm64/swap.c new file mode 100644 index 0000000000..6a9117cf0e --- /dev/null +++ b/kernel/arm64/swap.c @@ -0,0 +1,40 @@ +/*************************************************************************** +Copyright (c) 2013, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ +#include "common.h" +#include "swap_kernel_sve.c" +#include "swap_kernel_c.c" + +int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT dummy3, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *dummy, BLASLONG dummy2) +{ + if (n <= 0) + return 0; + if (inc_x == 1 && inc_y == 1) + swap_kernel_sve(n, x, y); + else + swap_kernel_c(n, x, inc_x, y, inc_y); + return (0); +} \ No newline at end of file diff --git a/kernel/arm64/swap_kernel_c.c b/kernel/arm64/swap_kernel_c.c new file mode 100644 index 0000000000..4029350962 --- /dev/null +++ b/kernel/arm64/swap_kernel_c.c @@ -0,0 +1,46 @@ +/*************************************************************************** +Copyright (c) 2013, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ +#include "common.h" +#include + +static int swap_kernel_c(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) +{ + BLASLONG i = 0; + BLASLONG ix = 0, iy = 0; + FLOAT temp; + + while (i < n) + { + temp = x[ix]; + x[ix] = y[iy]; + y[iy] = temp; + ix += inc_x; + iy += inc_y; + i++; + } + return (0); +} \ No newline at end of file diff --git a/kernel/arm64/swap_kernel_sve.c b/kernel/arm64/swap_kernel_sve.c new file mode 100644 index 0000000000..1efdce48bd --- /dev/null +++ b/kernel/arm64/swap_kernel_sve.c @@ -0,0 +1,62 @@ +/******************************************************************************* +Copyright (c) 2015, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*******************************************************************************/ +#include "common.h" +#include + +#ifdef DOUBLE +#define SVE_TYPE svfloat64_t +#define SVE_ZERO svdup_f64(0.0) +#define SVE_WHILELT svwhilelt_b64 +#define SVE_ALL svptrue_b64() +#define SVE_WIDTH svcntd() +#else +#define SVE_TYPE svfloat32_t +#define SVE_ZERO svdup_f32(0.0) +#define SVE_WHILELT svwhilelt_b32 +#define SVE_ALL svptrue_b32() +#define SVE_WIDTH svcntw() +#endif + +static int swap_kernel_sve(BLASLONG n, FLOAT *x, FLOAT *y) +{ + BLASLONG sve_width = SVE_WIDTH; + + for (BLASLONG i = 0; i < n; i += sve_width * 2) + { + svbool_t pg_a = SVE_WHILELT((uint64_t)i, (uint64_t)n); + svbool_t pg_b = SVE_WHILELT((uint64_t)(i + sve_width), (uint64_t)n); + SVE_TYPE x_vec_a = svld1(pg_a, &x[i]); + SVE_TYPE y_vec_a = svld1(pg_a, &y[i]); + SVE_TYPE x_vec_b = svld1(pg_b, &x[i + sve_width]); + SVE_TYPE y_vec_b = svld1(pg_b, &y[i + sve_width]); + svst1(pg_a, &x[i], y_vec_a); + svst1(pg_a, &y[i], x_vec_a); + svst1(pg_b, &x[i + sve_width], y_vec_b); + svst1(pg_b, &y[i + sve_width], x_vec_b); + } + return (0); +} diff --git a/kernel/arm64/zasum_thunderx2t99.c b/kernel/arm64/zasum_thunderx2t99.c index 1d303a9a30..4813574002 100644 --- a/kernel/arm64/zasum_thunderx2t99.c +++ b/kernel/arm64/zasum_thunderx2t99.c @@ -153,8 +153,9 @@ static FLOAT zasum_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x) " asr "J", "N", #4 \n" " cmp "J", xzr \n" " beq 3f //asum_kernel_F1 \n" - +#if !(defined(__clang__) && defined(OS_WINDOWS)) ".align 5 \n" +#endif "2: //asum_kernel_F16: \n" " "KERNEL_F16" \n" " subs "J", "J", #1 \n" diff --git a/kernel/csky/KERNEL b/kernel/csky/KERNEL index afa8a08817..0302057a2a 100644 --- a/kernel/csky/KERNEL +++ b/kernel/csky/KERNEL @@ -146,4 +146,14 @@ DGEMM_BETA = ../generic/gemm_beta.c CGEMM_BETA = ../generic/zgemm_beta.c ZGEMM_BETA = ../generic/zgemm_beta.c +ifndef SROTMKERNEL +SROTMKERNEL = ../generic/rotm.c +endif +ifndef DROTMKERNEL +DROTMKERNEL = ../generic/rotm.c +endif + +ifndef QROTMKERNEL +QROTMKERNEL = ../generic/rotm.c +endif diff --git a/kernel/e2k/KERNEL b/kernel/e2k/KERNEL index afa8a08817..0302057a2a 100644 --- a/kernel/e2k/KERNEL +++ b/kernel/e2k/KERNEL @@ -146,4 +146,14 @@ DGEMM_BETA = ../generic/gemm_beta.c CGEMM_BETA = ../generic/zgemm_beta.c ZGEMM_BETA = ../generic/zgemm_beta.c +ifndef SROTMKERNEL +SROTMKERNEL = ../generic/rotm.c +endif +ifndef DROTMKERNEL +DROTMKERNEL = ../generic/rotm.c +endif + +ifndef QROTMKERNEL +QROTMKERNEL = ../generic/rotm.c +endif diff --git a/kernel/generic/rotm.c b/kernel/generic/rotm.c new file mode 100644 index 0000000000..e151aa5f88 --- /dev/null +++ b/kernel/generic/rotm.c @@ -0,0 +1,159 @@ +/*************************************************************************** +Copyright (c) 2013, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +int CNAME(BLASLONG n, FLOAT *dx, BLASLONG incx, FLOAT *dy, BLASLONG incy, FLOAT *dparam) +{ + BLASLONG i__1, i__2; + BLASLONG i__; + FLOAT w, z__; + BLASLONG kx, ky; + FLOAT dh11, dh12, dh22, dh21, dflag; + BLASLONG nsteps; + + --dparam; + --dy; + --dx; + + dflag = dparam[1]; + if (n <= 0 || dflag == - 2.0) goto L140; + + if (! (incx == incy && incx > 0)) goto L70; + + nsteps = n * incx; + if (dflag < 0.) { + goto L50; + } else if (dflag == 0) { + goto L10; + } else { + goto L30; + } +L10: + dh12 = dparam[4]; + dh21 = dparam[3]; + i__1 = nsteps; + i__2 = incx; + for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { + w = dx[i__]; + z__ = dy[i__]; + dx[i__] = w + z__ * dh12; + dy[i__] = w * dh21 + z__; +/* L20: */ + } + goto L140; +L30: + dh11 = dparam[2]; + dh22 = dparam[5]; + i__2 = nsteps; + i__1 = incx; + for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) { + w = dx[i__]; + z__ = dy[i__]; + dx[i__] = w * dh11 + z__; + dy[i__] = -w + dh22 * z__; +/* L40: */ + } + goto L140; +L50: + dh11 = dparam[2]; + dh12 = dparam[4]; + dh21 = dparam[3]; + dh22 = dparam[5]; + i__1 = nsteps; + i__2 = incx; + for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { + w = dx[i__]; + z__ = dy[i__]; + dx[i__] = w * dh11 + z__ * dh12; + dy[i__] = w * dh21 + z__ * dh22; +/* L60: */ + } + goto L140; +L70: + kx = 1; + ky = 1; + if (incx < 0) { + kx = (1 - n) * incx + 1; + } + if (incy < 0) { + ky = (1 - n) * incy + 1; + } + + if (dflag < 0.) { + goto L120; + } else if (dflag == 0) { + goto L80; + } else { + goto L100; + } +L80: + dh12 = dparam[4]; + dh21 = dparam[3]; + i__2 = n; + for (i__ = 1; i__ <= i__2; ++i__) { + w = dx[kx]; + z__ = dy[ky]; + dx[kx] = w + z__ * dh12; + dy[ky] = w * dh21 + z__; + kx += incx; + ky += incy; +/* L90: */ + } + goto L140; +L100: + dh11 = dparam[2]; + dh22 = dparam[5]; + i__2 = n; + for (i__ = 1; i__ <= i__2; ++i__) { + w = dx[kx]; + z__ = dy[ky]; + dx[kx] = w * dh11 + z__; + dy[ky] = -w + dh22 * z__; + kx += incx; + ky += incy; +/* L110: */ + } + goto L140; +L120: + dh11 = dparam[2]; + dh12 = dparam[4]; + dh21 = dparam[3]; + dh22 = dparam[5]; + i__2 = n; + for (i__ = 1; i__ <= i__2; ++i__) { + w = dx[kx]; + z__ = dy[ky]; + dx[kx] = w * dh11 + z__ * dh12; + dy[ky] = w * dh21 + z__ * dh22; + kx += incx; + ky += incy; +/* L130: */ + } +L140: + return(0); +} diff --git a/kernel/generic/zgemm_beta.c b/kernel/generic/zgemm_beta.c index 7954e22e3c..61dd207d0c 100644 --- a/kernel/generic/zgemm_beta.c +++ b/kernel/generic/zgemm_beta.c @@ -58,8 +58,8 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, c_offset = c; if (beta_r == 0. && beta_i == 0.) { - j = n; - do { + + for (j=n;j>0;j--) { c_offset1 = c_offset; c_offset += ldc; @@ -88,13 +88,12 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, i--; } while (i > 0); } - j --; - } while (j > 0); + } } else { - j = n; - do { + + for (j=n;j>0;j--) { c_offset1 = c_offset; c_offset += ldc; @@ -151,8 +150,7 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, i --; } while (i > 0); } - j --; - } while (j > 0); + } } return 0; } diff --git a/kernel/ia64/KERNEL b/kernel/ia64/KERNEL index 870aac473e..bbfec7d556 100644 --- a/kernel/ia64/KERNEL +++ b/kernel/ia64/KERNEL @@ -142,3 +142,15 @@ ZTRSMKERNEL_RT = ztrsm_kernel_RT.S CGEMM3MKERNEL = zgemm3m_kernel.S ZGEMM3MKERNEL = zgemm3m_kernel.S + +ifndef SROTMKERNEL +SROTMKERNEL = ../generic/rotm.c +endif + +ifndef DROTMKERNEL +DROTMKERNEL = ../generic/rotm.c +endif + +ifndef QROTMKERNEL +QROTMKERNEL = ../generic/rotm.c +endif diff --git a/kernel/loongarch64/KERNEL b/kernel/loongarch64/KERNEL index e5d145a718..46d8daaa96 100644 --- a/kernel/loongarch64/KERNEL +++ b/kernel/loongarch64/KERNEL @@ -236,3 +236,15 @@ ZGEMM3MKERNEL = zgemm3m_kernel.S endif DSDOTKERNEL = dot.S + +ifndef SROTMKERNEL +SROTMKERNEL = ../generic/rotm.c +endif + +ifndef DROTMKERNEL +DROTMKERNEL = ../generic/rotm.c +endif + +ifndef QROTMKERNEL +QROTMKERNEL = ../generic/rotm.c +endif diff --git a/kernel/loongarch64/KERNEL.generic b/kernel/loongarch64/KERNEL.generic index 213add9ee5..b2e4cb44ad 100644 --- a/kernel/loongarch64/KERNEL.generic +++ b/kernel/loongarch64/KERNEL.generic @@ -169,3 +169,15 @@ QCABS_KERNEL = ../generic/cabs.c #Dump kernel CGEMM3MKERNEL = ../generic/zgemm3mkernel_dump.c ZGEMM3MKERNEL = ../generic/zgemm3mkernel_dump.c + +ifndef SROTMKERNEL +SROTMKERNEL = ../generic/rotm.c +endif + +ifndef DROTMKERNEL +DROTMKERNEL = ../generic/rotm.c +endif + +ifndef QROTMKERNEL +QROTMKERNEL = ../generic/rotm.c +endif diff --git a/kernel/loongarch64/dsymv_L_lasx.S b/kernel/loongarch64/dsymv_L_lasx.S index 2259966d86..5082322283 100644 --- a/kernel/loongarch64/dsymv_L_lasx.S +++ b/kernel/loongarch64/dsymv_L_lasx.S @@ -28,6 +28,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define ASSEMBLER #include "common.h" +#include "loongarch64_asm.S" /* Param */ #define M $r4 @@ -57,6 +58,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define T2 $r28 #define T3 $r29 #define T4 $r30 +#define T5 $r17 +#define T6 $r16 /* LSX vectors */ #define U0 $xr31 @@ -87,10 +90,113 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define a8 $f8 #define a9 $f9 +.macro LOAD_Y_8 + beqz T5, .L01_Y_0 + add.d T2, IY, INCY + fldx.d $f4, Y, T2 + add.d T2, T2, INCY + fldx.d $f5, Y, T2 + add.d T2, T2, INCY + fldx.d $f6, Y, T2 + add.d T2, T2, INCY + fldx.d $f7, Y, T2 - PROLOGUE + add.d T2, T2, INCY + fldx.d $f8, Y, T2 + add.d T2, T2, INCY + fldx.d $f9, Y, T2 + add.d T2, T2, INCY + fldx.d $f10, Y, T2 + add.d T2, T2, INCY + fldx.d $f11, Y, T2 + + vextrins.d $vr4, $vr5, 0x10 + vextrins.d $vr6, $vr7, 0x10 + xvpermi.q U4, U6, 0x02 - LDARG BUFFER, $sp, 0 + vextrins.d $vr8, $vr9, 0x10 + vextrins.d $vr10, $vr11, 0x10 + xvpermi.q U8, U10, 0x02 + b .L01_Y_1 +.L01_Y_0: + add.d T3, IY, INCY + xvldx U4, Y, T3 + alsl.d T4, INCY, T3, 2 + xvldx U8, Y, T4 +.L01_Y_1: +.endm + +.macro LOAD_X_8 + beqz T6, .L01_X_0 + add.d T2, IX, INCX + fldx.d $f4, X, T2 + add.d T2, T2, INCX + fldx.d $f5, X, T2 + add.d T2, T2, INCX + fldx.d $f6, X, T2 + add.d T2, T2, INCX + fldx.d $f7, X, T2 + + add.d T2, T2, INCX + fldx.d $f8, X, T2 + add.d T2, T2, INCX + fldx.d $f9, X, T2 + add.d T2, T2, INCX + fldx.d $f10, X, T2 + add.d T2, T2, INCX + fldx.d $f11, X, T2 + + vextrins.d $vr4, $vr5, 0x10 + vextrins.d $vr6, $vr7, 0x10 + xvpermi.q U4, U6, 0x02 + + vextrins.d $vr8, $vr9, 0x10 + vextrins.d $vr10, $vr11, 0x10 + xvpermi.q U8, U10, 0x02 + b .L01_X_1 +.L01_X_0: + add.d T3, IX, INCX + xvldx U4, X, T3 + alsl.d T2, INCX, T3, 2 + xvldx U8, X, T2 +.L01_X_1: +.endm + +.macro STORE_Y_8 + beqz T5, .L01_Y_2 + xvpermi.d U6, U4, 0xee + vextrins.d $vr5, $vr4, 0x01 + vextrins.d $vr7, $vr6, 0x01 + + xvpermi.d U10, U8, 0xee + vextrins.d $vr9, $vr8, 0x01 + vextrins.d $vr11, $vr10, 0x01 + + add.d T2, IY, INCY + fstx.d $f4, Y, T2 + add.d T2, T2, INCY + fstx.d $f5, Y, T2 + add.d T2, T2, INCY + fstx.d $f6, Y, T2 + add.d T2, T2, INCY + fstx.d $f7, Y, T2 + + add.d T2, T2, INCY + fstx.d $f8, Y, T2 + add.d T2, T2, INCY + fstx.d $f9, Y, T2 + add.d T2, T2, INCY + fstx.d $f10, Y, T2 + add.d T2, T2, INCY + fstx.d $f11, Y, T2 + b .L01_Y_3 +.L01_Y_2: + xvstx U4, Y, T3 + xvstx U8, Y, T4 +.L01_Y_3: +.endm + + PROLOGUE addi.d $sp, $sp, -88 @@ -107,6 +213,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. xvldrepl.d VALPHA, $sp, 80 + addi.d T5, INCY, -1 + addi.d T6, INCX, -1 slli.d LDA, LDA, BASE_SHIFT slli.d INCX, INCX, BASE_SHIFT slli.d INCY, INCY, BASE_SHIFT @@ -122,11 +230,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. beq J, N, .L999 .L01: - MTC a2, $r0 //temp2 + xvxor.v U2, U2, U2 fldx.d a6, X, JX fmul.d a3, ALPHA, a6 //temp1 xvreplve0.d U3, U3 - xvreplve0.d U2, U2 mul.d T0, J, LDA slli.d T1, J, BASE_SHIFT @@ -147,126 +254,41 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. srai.d T0, T0, 3 add.d T0, T0, J addi.d T0, T0, 1 - beq I, T0, .L03 - bge I, T0, .L03 + beq I, T0, .L03 + bge I, T0, .L03 mul.d T1, J, LDA add.d T1, T1, II .L02: /* /8 */ xvldx U1, AO1, T1 - addi.d T1, T1, 32 - xvldx U14, AO1, T1 - addi.d T1, T1, 32 + addi.d T2, T1, 32 + xvldx U14, AO1, T2 - add.d T2, IY, INCY - fldx.d $f4, Y, T2 - add.d T2, T2, INCY - fldx.d $f5, Y, T2 - add.d T2, T2, INCY - fldx.d $f6, Y, T2 - add.d T2, T2, INCY - fldx.d $f7, Y, T2 - - add.d T2, T2, INCY - fldx.d $f8, Y, T2 - add.d T2, T2, INCY - fldx.d $f9, Y, T2 - add.d T2, T2, INCY - fldx.d $f10, Y, T2 - add.d T2, T2, INCY - fldx.d $f11, Y, T2 - - vextrins.d $vr4, $vr5, 0x10 - vextrins.d $vr6, $vr7, 0x10 - xvpermi.q U4, U6, 0x02 - - vextrins.d $vr8, $vr9, 0x10 - vextrins.d $vr10, $vr11, 0x10 - xvpermi.q U8, U10, 0x02 + LOAD_Y_8 xvfmadd.d U4, U3, U1, U4 xvfmadd.d U8, U3, U14, U8 - xvpermi.d U6, U4, 0xee - vextrins.d $vr5, $vr4, 0x01 - vextrins.d $vr7, $vr6, 0x01 - - xvpermi.d U10, U8, 0xee - vextrins.d $vr9, $vr8, 0x01 - vextrins.d $vr11, $vr10, 0x01 - - add.d T2, IY, INCY - fstx.d $f4, Y, T2 - add.d T2, T2, INCY - fstx.d $f5, Y, T2 - add.d T2, T2, INCY - fstx.d $f6, Y, T2 - add.d T2, T2, INCY - fstx.d $f7, Y, T2 - - add.d T2, T2, INCY - fstx.d $f8, Y, T2 - add.d T2, T2, INCY - fstx.d $f9, Y, T2 - add.d T2, T2, INCY - fstx.d $f10, Y, T2 - add.d T2, T2, INCY - fstx.d $f11, Y, T2 - - slli.d T2, INCY, 3 - add.d IY, IY, T2 - - add.d T2, IX, INCX - fldx.d $f4, X, T2 - add.d T2, T2, INCX - fldx.d $f5, X, T2 - add.d T2, T2, INCX - fldx.d $f6, X, T2 - add.d T2, T2, INCX - fldx.d $f7, X, T2 - - add.d T2, T2, INCX - fldx.d $f8, X, T2 - add.d T2, T2, INCX - fldx.d $f9, X, T2 - add.d T2, T2, INCX - fldx.d $f10, X, T2 - add.d T2, T2, INCX - fldx.d $f11, X, T2 - - vextrins.d $vr4, $vr5, 0x10 - vextrins.d $vr6, $vr7, 0x10 - xvpermi.q U4, U6, 0x02 - - vextrins.d $vr8, $vr9, 0x10 - vextrins.d $vr10, $vr11, 0x10 - xvpermi.q U8, U10, 0x02 - - xvand.v $xr12, $xr2, $xr2 - - xvfmadd.d U2, U1, U4, U2 - xvfsub.d U2, U2, $xr12 - xvfmadd.d U2, U14, U8, U2 + STORE_Y_8 - xvpermi.d U4, U2, 0x01 - xvpermi.d U5, U2, 0x02 - xvpermi.d U6, U2, 0x03 + alsl.d IY, INCY, IY, 3 - fadd.d $f2, $f2, $f4 - fadd.d $f2, $f2, $f5 - fadd.d $f2, $f2, $f6 - fadd.d $f2, $f2, $f12 + LOAD_X_8 - xvreplve0.d U2, U2 + xvfmadd.d U2, U1, U4, U2 + xvfmadd.d U2, U14, U8, U2 - slli.d T2, INCX, 3 - add.d IX, IX, T2 + alsl.d IX, INCX, IX, 3 + addi.d T1, T1, 64 addi.d II, II, 64 addi.d I, I, 1 blt I, T0, .L02 + //Acc U2 + GACC xvf, d, U4, U2 + xvreplve0.d U2, U4 .L03: /* &4 */ sub.d T0, M, J addi.d T0, T0, -1 @@ -437,4 +459,4 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi.d $sp, $sp, 88 jirl $r0, $r1, 0x0 - EPILOGUE \ No newline at end of file + EPILOGUE diff --git a/kernel/loongarch64/dsymv_L_lsx.S b/kernel/loongarch64/dsymv_L_lsx.S index 1fd0d26f58..fed4081089 100644 --- a/kernel/loongarch64/dsymv_L_lsx.S +++ b/kernel/loongarch64/dsymv_L_lsx.S @@ -28,6 +28,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define ASSEMBLER #include "common.h" +#include "loongarch64_asm.S" /* Param */ #define M $r4 @@ -57,6 +58,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define T2 $r28 #define T3 $r29 #define T4 $r30 +#define T5 $r17 +#define T6 $r16 +#define T7 $r12 /* LSX vectors */ #define U0 $vr31 @@ -87,10 +91,114 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define a8 $f8 #define a9 $f9 +.macro LOAD_Y_8 + beqz T5, .L01_Y_0 + add.d T2, IY, INCY + fldx.d $f4, Y, T2 + add.d T2, T2, INCY + fldx.d $f5, Y, T2 + add.d T2, T2, INCY + fldx.d $f6, Y, T2 + add.d T2, T2, INCY + fldx.d $f7, Y, T2 - PROLOGUE + add.d T2, T2, INCY + fldx.d $f8, Y, T2 + add.d T2, T2, INCY + fldx.d $f9, Y, T2 + add.d T2, T2, INCY + fldx.d $f10, Y, T2 + add.d T2, T2, INCY + fldx.d $f11, Y, T2 + + vextrins.d U4, U5, 0x10 + vextrins.d U6, U7, 0x10 + vextrins.d U8, U9, 0x10 + vextrins.d U10, U11, 0x10 + b .L01_Y_1 +.L01_Y_0: + add.d T7, IY, INCY + vldx U4, Y, T7 + alsl.d T2, INCY, T7, 1 + vldx U6, Y, T2 + alsl.d T3, INCY, T2, 1 + vldx U8, Y, T3 + alsl.d T4, INCY, T3, 1 + vldx U10, Y, T4 +.L01_Y_1: +.endm + +.macro LOAD_X_8 + beqz T6, .L01_X_0 + add.d T2, IX, INCX + fldx.d $f4, X, T2 + add.d T2, T2, INCX + fldx.d $f5, X, T2 + add.d T2, T2, INCX + fldx.d $f6, X, T2 + add.d T2, T2, INCX + fldx.d $f7, X, T2 + + add.d T2, T2, INCX + fldx.d $f8, X, T2 + add.d T2, T2, INCX + fldx.d $f9, X, T2 + add.d T2, T2, INCX + fldx.d $f10, X, T2 + add.d T2, T2, INCX + fldx.d $f11, X, T2 + + vextrins.d U4, U5, 0x10 + vextrins.d U6, U7, 0x10 + vextrins.d U8, U9, 0x10 + vextrins.d U10, U11, 0x10 + b .L01_X_1 +.L01_X_0: + add.d T7, IX, INCX + vldx U4, X, T7 + alsl.d T2, INCX, T7, 1 + vldx U6, X, T2 + alsl.d T3, INCX, T2, 1 + vldx U8, X, T3 + alsl.d T4, INCX, T3, 1 + vldx U10, X, T4 +.L01_X_1: +.endm + +.macro STORE_Y_8 + beqz T5, .L01_Y_2 + vextrins.d U5, U4, 0x01 + vextrins.d U7, U6, 0x01 + vextrins.d U9, U8, 0x01 + vextrins.d U11, U10, 0x01 + + add.d T2, IY, INCY + fstx.d $f4, Y, T2 + add.d T2, T2, INCY + fstx.d $f5, Y, T2 + add.d T2, T2, INCY + fstx.d $f6, Y, T2 + add.d T2, T2, INCY + fstx.d $f7, Y, T2 + + add.d T2, T2, INCY + fstx.d $f8, Y, T2 + add.d T2, T2, INCY + fstx.d $f9, Y, T2 + add.d T2, T2, INCY + fstx.d $f10, Y, T2 + add.d T2, T2, INCY + fstx.d $f11, Y, T2 + b .L01_Y_3 +.L01_Y_2: + vstx U4, Y, T7 + vstx U6, Y, T2 + vstx U8, Y, T3 + vstx U10, Y, T4 +.L01_Y_3: +.endm - LDARG BUFFER, $sp, 0 + PROLOGUE addi.d $sp, $sp, -88 @@ -107,6 +215,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vldrepl.d VALPHA, $sp, 80 + addi.d T5, INCY, -1 + addi.d T6, INCX, -1 slli.d LDA, LDA, BASE_SHIFT slli.d INCX, INCX, BASE_SHIFT slli.d INCY, INCY, BASE_SHIFT @@ -122,11 +232,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. beq J, N, .L999 .L01: - MTC a2, $r0 //temp2 + vxor.v U2, U2, U2 fldx.d a6, X, JX fmul.d a3, ALPHA, a6 //temp1 vshuf4i.d U3, U3, 0x00 - vshuf4i.d U2, U2, 0x00 mul.d T0, J, LDA slli.d T1, J, BASE_SHIFT @@ -163,105 +272,34 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vldx U16, AO1, T1 addi.d T1, T1, 16 - add.d T2, IY, INCY - fldx.d $f4, Y, T2 - add.d T2, T2, INCY - fldx.d $f5, Y, T2 - add.d T2, T2, INCY - fldx.d $f6, Y, T2 - add.d T2, T2, INCY - fldx.d $f7, Y, T2 - - add.d T2, T2, INCY - fldx.d $f8, Y, T2 - add.d T2, T2, INCY - fldx.d $f9, Y, T2 - add.d T2, T2, INCY - fldx.d $f10, Y, T2 - add.d T2, T2, INCY - fldx.d $f11, Y, T2 - - vextrins.d U4, U5, 0x10 - vextrins.d U6, U7, 0x10 - vextrins.d U8, U9, 0x10 - vextrins.d U10, U11, 0x10 + LOAD_Y_8 vfmadd.d U4, U3, U1, U4 vfmadd.d U6, U3, U14, U6 vfmadd.d U8, U3, U15, U8 vfmadd.d U10, U3, U16, U10 - vextrins.d U5, U4, 0x01 - vextrins.d U7, U6, 0x01 - vextrins.d U9, U8, 0x01 - vextrins.d U11, U10, 0x01 - - add.d T2, IY, INCY - fstx.d $f4, Y, T2 - add.d T2, T2, INCY - fstx.d $f5, Y, T2 - add.d T2, T2, INCY - fstx.d $f6, Y, T2 - add.d T2, T2, INCY - fstx.d $f7, Y, T2 - - add.d T2, T2, INCY - fstx.d $f8, Y, T2 - add.d T2, T2, INCY - fstx.d $f9, Y, T2 - add.d T2, T2, INCY - fstx.d $f10, Y, T2 - add.d T2, T2, INCY - fstx.d $f11, Y, T2 - - slli.d T2, INCY, 3 - add.d IY, IY, T2 - - add.d T2, IX, INCX - fldx.d $f4, X, T2 - add.d T2, T2, INCX - fldx.d $f5, X, T2 - add.d T2, T2, INCX - fldx.d $f6, X, T2 - add.d T2, T2, INCX - fldx.d $f7, X, T2 - - add.d T2, T2, INCX - fldx.d $f8, X, T2 - add.d T2, T2, INCX - fldx.d $f9, X, T2 - add.d T2, T2, INCX - fldx.d $f10, X, T2 - add.d T2, T2, INCX - fldx.d $f11, X, T2 + STORE_Y_8 - vextrins.d U4, U5, 0x10 - vextrins.d U6, U7, 0x10 - vextrins.d U8, U9, 0x10 - vextrins.d U10, U11, 0x10 + alsl.d IY, INCY, IY, 3 - vand.v $vr12, $vr2, $vr2 + LOAD_X_8 vfmadd.d U2, U1, U4, U2 - vfsub.d U2, U2, $vr12 vfmadd.d U2, U14, U6, U2 vfmadd.d U2, U15, U8, U2 vfmadd.d U2, U16, U10, U2 - vextrins.d U4, U2, 0x01 - - fadd.d $f2, $f2, $f4 - fadd.d $f2, $f2, $f12 - - vextrins.d U2, U2, 0x10 - - slli.d T2, INCX, 3 - add.d IX, IX, T2 + alsl.d IX, INCX, IX, 3 addi.d II, II, 64 addi.d I, I, 1 blt I, T0, .L02 + // Acc U2 + GACC vf, d, U4, U2 + vilvl.d U2, U4, U4 + .L03: /* &4 */ sub.d T0, M, J addi.d T0, T0, -1 @@ -429,4 +467,4 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi.d $sp, $sp, 88 jirl $r0, $r1, 0x0 - EPILOGUE \ No newline at end of file + EPILOGUE diff --git a/kernel/loongarch64/dsymv_U_lasx.S b/kernel/loongarch64/dsymv_U_lasx.S index 57eb90aaef..21bf3dffcf 100644 --- a/kernel/loongarch64/dsymv_U_lasx.S +++ b/kernel/loongarch64/dsymv_U_lasx.S @@ -28,6 +28,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define ASSEMBLER #include "common.h" +#include "loongarch64_asm.S" /* Param */ #define M $r4 @@ -57,6 +58,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define T2 $r28 #define T3 $r29 #define T4 $r30 +#define T5 $r17 +#define T6 $r16 /* LSX vectors */ #define U0 $xr31 @@ -87,67 +90,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define a8 $f8 #define a9 $f9 - - PROLOGUE - - LDARG BUFFER, $sp, 0 - - addi.d $sp, $sp, -88 - - SDARG $r23, $sp, 0 - SDARG $r24, $sp, 8 - SDARG $r25, $sp, 16 - SDARG $r26, $sp, 32 - SDARG $r27, $sp, 40 - SDARG $r28, $sp, 48 - SDARG $r29, $sp, 56 - SDARG $r30, $sp, 64 - SDARG $r31, $sp, 72 - ST ALPHA, $sp, 80 - - xvldrepl.d VALPHA, $sp, 80 - - slli.d LDA, LDA, BASE_SHIFT - slli.d INCX, INCX, BASE_SHIFT - slli.d INCY, INCY, BASE_SHIFT - - bge $r0, M, .L999 - bge $r0, N, .L999 - - sub.d M1, M, N - - mul.d JY, M1, INCY - mul.d JX, M1, INCX - - move J, M1 - move AO1, A - - beq J, M, .L999 - -.L01: - MTC $f2, $r0 //temp2 - fldx.d $f6, X, JX - fmul.d $f3, ALPHA, $f6 //temp1 - xvreplve0.d U3, U3 - xvreplve0.d U2, U2 - - move IY, $r0 - move IX, $r0 - move II, $r0 - move I, $r0 - - srai.d T0, J, 3 - beq I, T0, .L03 - - mul.d T1, J, LDA - add.d T1, T1, II - -.L02: /* /8 */ - xvldx U1, AO1, T1 - addi.d T1, T1, 32 - xvldx U14, AO1, T1 - addi.d T1, T1, 32 - +.macro LOAD_Y_8 + beqz T5, .L01_Y_0 fldx.d $f4, Y, IY add.d T2, IY, INCY fldx.d $f5, Y, T2 @@ -167,20 +111,26 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vextrins.d $vr4, $vr5, 0x10 vextrins.d $vr6, $vr7, 0x10 - xvpermi.q U4, U6, 0x02 + xvpermi.q U4, U6, 0x02 vextrins.d $vr8, $vr9, 0x10 vextrins.d $vr10, $vr11, 0x10 - xvpermi.q U8, U10, 0x02 - - xvfmadd.d U4, U3, U1, U4 - xvfmadd.d U8, U3, U14, U8 - - xvpermi.d U6, U4, 0xee + xvpermi.q U8, U10, 0x02 + b .L01_Y_1 +.L01_Y_0: + xvldx U4, Y, IY + alsl.d T4, INCY, IY, 2 + xvldx U8, Y, T4 +.L01_Y_1: +.endm + +.macro STORE_Y_8 + beqz T5, .L01_Y_2 + xvpermi.d U6, U4, 0xee vextrins.d $vr5, $vr4, 0x01 vextrins.d $vr7, $vr6, 0x01 - xvpermi.d U10, U8, 0xee + xvpermi.d U10, U8, 0xee vextrins.d $vr9, $vr8, 0x01 vextrins.d $vr11, $vr10, 0x01 @@ -200,10 +150,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fstx.d $f10, Y, T2 add.d T2, T2, INCY fstx.d $f11, Y, T2 - - slli.d T2, INCY, 3 - add.d IY, IY, T2 - + b .L01_Y_3 +.L01_Y_2: + xvstx U4, Y, IY + xvstx U8, Y, T4 +.L01_Y_3: +.endm + +.macro LOAD_X_8 + beqz T6, .L01_X_0 fldx.d $f4, X, IX add.d T2, IX, INCX fldx.d $f5, X, T2 @@ -223,36 +178,102 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vextrins.d $vr4, $vr5, 0x10 vextrins.d $vr6, $vr7, 0x10 - xvpermi.q U4, U6, 0x02 + xvpermi.q U4, U6, 0x02 vextrins.d $vr8, $vr9, 0x10 vextrins.d $vr10, $vr11, 0x10 - xvpermi.q U8, U10, 0x02 + xvpermi.q U8, U10, 0x02 + b .L01_X_1 +.L01_X_0: + xvldx U4, X, IX + alsl.d T2, INCX, IX, 2 + xvldx U8, X, T2 +.L01_X_1: +.endm - xvand.v $xr12, $xr2, $xr2 + PROLOGUE - xvfmadd.d U2, U1, U4, U2 - xvfsub.d U2, U2, $xr12 - xvfmadd.d U2, U14, U8, U2 + addi.d $sp, $sp, -88 - xvpermi.d U4, U2, 0x01 - xvpermi.d U5, U2, 0x02 - xvpermi.d U6, U2, 0x03 + SDARG $r23, $sp, 0 + SDARG $r24, $sp, 8 + SDARG $r25, $sp, 16 + SDARG $r26, $sp, 32 + SDARG $r27, $sp, 40 + SDARG $r28, $sp, 48 + SDARG $r29, $sp, 56 + SDARG $r30, $sp, 64 + SDARG $r31, $sp, 72 + ST ALPHA, $sp, 80 - fadd.d $f2, $f2, $f4 - fadd.d $f2, $f2, $f5 - fadd.d $f2, $f2, $f6 - fadd.d $f2, $f2, $f12 + xvldrepl.d VALPHA, $sp, 80 - xvreplve0.d U2, U2 + addi.d T5, INCY, -1 + addi.d T6, INCX, -1 + slli.d LDA, LDA, BASE_SHIFT + slli.d INCX, INCX, BASE_SHIFT + slli.d INCY, INCY, BASE_SHIFT - slli.d T2, INCX, 3 - add.d IX, IX, T2 + bge $r0, M, .L999 + bge $r0, N, .L999 + + sub.d M1, M, N + + mul.d JY, M1, INCY + mul.d JX, M1, INCX + + move J, M1 + move AO1, A + beq J, M, .L999 + +.L01: + xvxor.v U2, U2, U2 + fldx.d $f6, X, JX + fmul.d $f3, ALPHA, $f6 //temp1 + xvreplve0.d U3, U3 + + move IY, $r0 + move IX, $r0 + move II, $r0 + move I, $r0 + + srai.d T0, J, 3 + beq I, T0, .L03 + + mul.d T1, J, LDA + add.d T1, T1, II + +.L02: /* /8 */ + xvldx U1, AO1, T1 + addi.d T2, T1, 32 + xvldx U14, AO1, T2 + + LOAD_Y_8 + + xvfmadd.d U4, U3, U1, U4 + xvfmadd.d U8, U3, U14, U8 + + STORE_Y_8 + + alsl.d IY, INCY, IY, 3 + + LOAD_X_8 + + xvfmadd.d U2, U1, U4, U2 + xvfmadd.d U2, U14, U8, U2 + + alsl.d IX, INCX, IX, 3 + + addi.d T1, T1, 64 addi.d II, II, 64 addi.d I, I, 1 blt I, T0, .L02 + //Acc U2 + GACC xvf, d, U4, U2 + xvreplve0.d U2, U4 + .L03: /* &4 */ andi T0, J, 4 beq $r0, T0, .L04 @@ -425,4 +446,4 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi.d $sp, $sp, 88 jirl $r0, $r1, 0x0 - EPILOGUE \ No newline at end of file + EPILOGUE diff --git a/kernel/loongarch64/dsymv_U_lsx.S b/kernel/loongarch64/dsymv_U_lsx.S index f708196aaa..2589f31910 100644 --- a/kernel/loongarch64/dsymv_U_lsx.S +++ b/kernel/loongarch64/dsymv_U_lsx.S @@ -28,6 +28,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define ASSEMBLER #include "common.h" +#include "loongarch64_asm.S" /* Param */ #define M $r4 @@ -57,6 +58,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define T2 $r28 #define T3 $r29 #define T4 $r30 +#define T5 $r17 +#define T6 $r16 +#define T7 $r12 /* LSX vectors */ #define U0 $vr31 @@ -87,10 +91,109 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define a8 $f8 #define a9 $f9 +.macro LOAD_Y_8 + beqz T5, .L01_Y_0 + fldx.d $f4, Y, IY + add.d T2, IY, INCY + fldx.d $f5, Y, T2 + add.d T2, T2, INCY + fldx.d $f6, Y, T2 + add.d T2, T2, INCY + fldx.d $f7, Y, T2 - PROLOGUE + add.d T2, T2, INCY + fldx.d $f8, Y, T2 + add.d T2, T2, INCY + fldx.d $f9, Y, T2 + add.d T2, T2, INCY + fldx.d $f10, Y, T2 + add.d T2, T2, INCY + fldx.d $f11, Y, T2 - LDARG BUFFER, $sp, 0 + vextrins.d U4, U5, 0x10 + vextrins.d U6, U7, 0x10 + vextrins.d U8, U9, 0x10 + vextrins.d U10, U11, 0x10 + b .L01_Y_1 +.L01_Y_0: + vldx U4, Y, IY + alsl.d T2, INCY, IY, 1 + vldx U6, Y, T2 + alsl.d T3, INCY, T2, 1 + vldx U8, Y, T3 + alsl.d T4, INCY, T3, 1 + vldx U10, Y, T4 +.L01_Y_1: +.endm + +.macro STORE_Y_8 + beqz T5, .L01_Y_2 + vextrins.d U5, U4, 0x01 + vextrins.d U7, U6, 0x01 + vextrins.d U9, U8, 0x01 + vextrins.d U11, U10, 0x01 + + fstx.d $f4, Y, IY + add.d T2, IY, INCY + fstx.d $f5, Y, T2 + add.d T2, T2, INCY + fstx.d $f6, Y, T2 + add.d T2, T2, INCY + fstx.d $f7, Y, T2 + + add.d T2, T2, INCY + fstx.d $f8, Y, T2 + add.d T2, T2, INCY + fstx.d $f9, Y, T2 + add.d T2, T2, INCY + fstx.d $f10, Y, T2 + add.d T2, T2, INCY + fstx.d $f11, Y, T2 + b .L01_Y_3 +.L01_Y_2: + vstx U4, Y, IY + vstx U6, Y, T2 + vstx U8, Y, T3 + vstx U10,Y, T4 +.L01_Y_3: +.endm + +.macro LOAD_X_8 + beqz T6, .L01_X_0 + fldx.d $f4, X, IX + add.d T2, IX, INCX + fldx.d $f5, X, T2 + add.d T2, T2, INCX + fldx.d $f6, X, T2 + add.d T2, T2, INCX + fldx.d $f7, X, T2 + + add.d T2, T2, INCX + fldx.d $f8, X, T2 + add.d T2, T2, INCX + fldx.d $f9, X, T2 + add.d T2, T2, INCX + fldx.d $f10, X, T2 + add.d T2, T2, INCX + fldx.d $f11, X, T2 + + vextrins.d U4, U5, 0x10 + vextrins.d U6, U7, 0x10 + vextrins.d U8, U9, 0x10 + vextrins.d U10, U11, 0x10 + b .L01_X_1 +.L01_X_0: + vldx U4, X, IX + alsl.d T2, INCX, IX, 1 + vldx U6, X, T2 + alsl.d T3, INCX, T2, 1 + vldx U8, X, T3 + alsl.d T4, INCX, T3, 1 + vldx U10, X, T4 +.L01_X_1: +.endm + + PROLOGUE addi.d $sp, $sp, -88 @@ -107,6 +210,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vldrepl.d VALPHA, $sp, 80 + addi.d T5, INCY, -1 + addi.d T6, INCX, -1 slli.d LDA, LDA, BASE_SHIFT slli.d INCX, INCX, BASE_SHIFT slli.d INCY, INCY, BASE_SHIFT @@ -125,11 +230,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. beq J, M, .L999 .L01: - MTC $f2, $r0 //temp2 + vxor.v U2, U2, U2 fldx.d $f6, X, JX fmul.d $f3, ALPHA, $f6 //temp1 vshuf4i.d U3, U3, 0x00 - vshuf4i.d U2, U2, 0x00 move IY, $r0 move IX, $r0 @@ -152,102 +256,34 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vldx U16, AO1, T1 addi.d T1, T1, 16 - fldx.d $f4, Y, IY - add.d T2, IY, INCY - fldx.d $f5, Y, T2 - add.d T2, T2, INCY - fldx.d $f6, Y, T2 - add.d T2, T2, INCY - fldx.d $f7, Y, T2 - - add.d T2, T2, INCY - fldx.d $f8, Y, T2 - add.d T2, T2, INCY - fldx.d $f9, Y, T2 - add.d T2, T2, INCY - fldx.d $f10, Y, T2 - add.d T2, T2, INCY - fldx.d $f11, Y, T2 - - vextrins.d U4, U5, 0x10 - vextrins.d U6, U7, 0x10 - vextrins.d U8, U9, 0x10 - vextrins.d U10, U11, 0x10 + LOAD_Y_8 vfmadd.d U4, U3, U1, U4 vfmadd.d U6, U3, U14, U6 vfmadd.d U8, U3, U15, U8 vfmadd.d U10, U3, U16, U10 - vextrins.d U5, U4, 0x01 - vextrins.d U7, U6, 0x01 - vextrins.d U9, U8, 0x01 - vextrins.d U11, U10, 0x01 + STORE_Y_8 - fstx.d $f4, Y, IY - add.d T2, IY, INCY - fstx.d $f5, Y, T2 - add.d T2, T2, INCY - fstx.d $f6, Y, T2 - add.d T2, T2, INCY - fstx.d $f7, Y, T2 + alsl.d IY, INCY, IY, 3 - add.d T2, T2, INCY - fstx.d $f8, Y, T2 - add.d T2, T2, INCY - fstx.d $f9, Y, T2 - add.d T2, T2, INCY - fstx.d $f10, Y, T2 - add.d T2, T2, INCY - fstx.d $f11, Y, T2 - - slli.d T2, INCY, 3 - add.d IY, IY, T2 - - fldx.d $f4, X, IX - add.d T2, IX, INCX - fldx.d $f5, X, T2 - add.d T2, T2, INCX - fldx.d $f6, X, T2 - add.d T2, T2, INCX - fldx.d $f7, X, T2 - - add.d T2, T2, INCX - fldx.d $f8, X, T2 - add.d T2, T2, INCX - fldx.d $f9, X, T2 - add.d T2, T2, INCX - fldx.d $f10, X, T2 - add.d T2, T2, INCX - fldx.d $f11, X, T2 - - vextrins.d U4, U5, 0x10 - vextrins.d U6, U7, 0x10 - vextrins.d U8, U9, 0x10 - vextrins.d U10, U11, 0x10 - - vand.v $vr12, $vr2, $vr2 + LOAD_X_8 vfmadd.d U2, U1, U4, U2 - vfsub.d U2, U2, $vr12 vfmadd.d U2, U14, U6, U2 vfmadd.d U2, U15, U8, U2 vfmadd.d U2, U16, U10, U2 - vextrins.d U4, U2, 0x01 - - fadd.d $f2, $f2, $f4 - fadd.d $f2, $f2, $f12 - - vextrins.d U2, U2, 0x10 - - slli.d T2, INCX, 3 - add.d IX, IX, T2 + alsl.d IX, INCX, IX, 3 addi.d II, II, 64 addi.d I, I, 1 blt I, T0, .L02 + // Acc U2 + GACC vf, d, U4, U2 + vilvl.d U2, U4, U4 + .L03: /* &4 */ andi T0, J, 4 beq $r0, T0, .L04 @@ -417,4 +453,4 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi.d $sp, $sp, 88 jirl $r0, $r1, 0x0 - EPILOGUE \ No newline at end of file + EPILOGUE diff --git a/kernel/loongarch64/ssymv_L_lasx.S b/kernel/loongarch64/ssymv_L_lasx.S index 980c10fd74..21ffcec690 100644 --- a/kernel/loongarch64/ssymv_L_lasx.S +++ b/kernel/loongarch64/ssymv_L_lasx.S @@ -28,6 +28,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define ASSEMBLER #include "common.h" +#include "loongarch64_asm.S" /* Param */ #define M $r4 @@ -57,6 +58,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define T2 $r28 #define T3 $r29 #define T4 $r30 +#define T5 $r17 +#define T6 $r16 /* LSX vectors */ #define U0 $xr31 @@ -87,75 +90,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define a8 $f8 #define a9 $f9 - - PROLOGUE - - LDARG BUFFER, $sp, 0 - - addi.d $sp, $sp, -88 - - SDARG $r23, $sp, 0 - SDARG $r24, $sp, 8 - SDARG $r25, $sp, 16 - SDARG $r26, $sp, 32 - SDARG $r27, $sp, 40 - SDARG $r28, $sp, 48 - SDARG $r29, $sp, 56 - SDARG $r30, $sp, 64 - SDARG $r31, $sp, 72 - ST ALPHA, $sp, 80 - - xvldrepl.w VALPHA, $sp, 80 - - slli.d LDA, LDA, BASE_SHIFT - slli.d INCX, INCX, BASE_SHIFT - slli.d INCY, INCY, BASE_SHIFT - - bge $r0, M, .L999 - bge $r0, N, .L999 - - move J, $r0 - move JY, $r0 - move JX, $r0 - move AO1, A - - beq J, N, .L999 - -.L01: - MTC a2, $r0 //temp2 - fldx.s a6, X, JX - fmul.s a3, ALPHA, a6 //temp1 - xvreplve0.w U3, U3 - xvreplve0.w U2, U2 - - mul.w T0, J, LDA - slli.d T1, J, BASE_SHIFT - add.w T0, T0, T1 - fldx.s a6, AO1, T0 - fldx.s a4, Y, JY - fmadd.s a4, a3, a6, a4 - fstx.s a4, Y, JY - - move IY, JY - move IX, JX - addi.d II, J, 1 - move I, II - slli.d II, II, BASE_SHIFT - - sub.d T0, M, J - addi.d T0, T0, -1 - srai.d T0, T0, 3 - add.d T0, T0, J - addi.d T0, T0, 1 - beq I, T0, .L03 - bge I, T0, .L03 - - mul.w T1, J, LDA - add.d T1, T1, II - -.L02: /* /8 */ - xvldx U1, AO1, T1 - +.macro LOAD_Y_8 + beqz T5, .L01_Y_0 add.d T2, IY, INCY fldx.s $f4, Y, T2 add.d T2, T2, INCY @@ -180,11 +116,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vextrins.w $vr8, $vr9, 0x10 vextrins.w $vr8, $vr10, 0x20 vextrins.w $vr8, $vr11, 0x30 - xvpermi.q U4, U8, 0x02 - - xvfmadd.s U4, U3, U1, U4 - - xvpermi.d U8, U4, 0xee + xvpermi.q U4, U8, 0x02 + b .L01_Y_1 +.L01_Y_0: + add.d T3, IY, INCY + xvldx U4, Y, T3 +.L01_Y_1: +.endm + +.macro STORE_Y_8 + beqz T5, .L01_Y_2 + xvpermi.d U8, U4, 0xee vextrins.w $vr5, $vr4, 0x01 vextrins.w $vr6, $vr4, 0x02 vextrins.w $vr7, $vr4, 0x03 @@ -209,10 +151,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fstx.s $f10, Y, T2 add.d T2, T2, INCY fstx.s $f11, Y, T2 - - slli.d T2, INCY, 3 - add.d IY, IY, T2 - + b .L01_Y_3 +.L01_Y_2: + xvstx U4, Y, T3 +.L01_Y_3: +.endm + +.macro LOAD_X_8 + beqz T6, .L01_X_0 add.d T2, IX, INCX fldx.s $f4, X, T2 add.d T2, T2, INCX @@ -238,39 +184,103 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vextrins.w $vr8, $vr10, 0x20 vextrins.w $vr8, $vr11, 0x30 xvpermi.q U4, U8, 0x02 + b .L01_X_1 +.L01_X_0: + add.d T3, IX, INCX + xvldx U4, X, T3 +.L01_X_1: +.endm + + PROLOGUE - xvand.v $xr12, $xr2, $xr2 + addi.d $sp, $sp, -88 - xvfmadd.s U2, U1, U4, U2 - xvfsub.s U2, U2, $xr12 + SDARG $r23, $sp, 0 + SDARG $r24, $sp, 8 + SDARG $r25, $sp, 16 + SDARG $r26, $sp, 32 + SDARG $r27, $sp, 40 + SDARG $r28, $sp, 48 + SDARG $r29, $sp, 56 + SDARG $r30, $sp, 64 + SDARG $r31, $sp, 72 + ST ALPHA, $sp, 80 - xvpickve.w U4, U2, 0x01 - xvpickve.w U5, U2, 0x02 - xvpickve.w U6, U2, 0x03 - xvpickve.w U7, U2, 0x04 - xvpickve.w U8, U2, 0x05 - xvpickve.w U9, U2, 0x06 - xvpickve.w U10, U2, 0x07 + xvldrepl.w VALPHA, $sp, 80 - fadd.s $f2, $f2, $f4 - fadd.s $f2, $f2, $f5 - fadd.s $f2, $f2, $f6 - fadd.s $f2, $f2, $f7 - fadd.s $f2, $f2, $f8 - fadd.s $f2, $f2, $f9 - fadd.s $f2, $f2, $f10 - fadd.s $f2, $f2, $f12 + addi.d T5, INCY, -1 + addi.d T6, INCX, -1 + slli.d LDA, LDA, BASE_SHIFT + slli.d INCX, INCX, BASE_SHIFT + slli.d INCY, INCY, BASE_SHIFT - xvreplve0.d U2, U2 + bge $r0, M, .L999 + bge $r0, N, .L999 + + move J, $r0 + move JY, $r0 + move JX, $r0 + move AO1, A - slli.d T2, INCX, 3 - add.d IX, IX, T2 + beq J, N, .L999 + +.L01: + xvxor.v U2, U2, U2 + fldx.s a6, X, JX + fmul.s a3, ALPHA, a6 //temp1 + xvreplve0.w U3, U3 + + mul.w T0, J, LDA + slli.d T1, J, BASE_SHIFT + add.w T0, T0, T1 + fldx.s a6, AO1, T0 + fldx.s a4, Y, JY + fmadd.s a4, a3, a6, a4 + fstx.s a4, Y, JY + + move IY, JY + move IX, JX + addi.d II, J, 1 + move I, II + slli.d II, II, BASE_SHIFT + + sub.d T0, M, J + addi.d T0, T0, -1 + srai.d T0, T0, 3 + add.d T0, T0, J + addi.d T0, T0, 1 + beq I, T0, .L03 + bge I, T0, .L03 + + mul.w T1, J, LDA + add.d T1, T1, II + +.L02: /* /8 */ + xvldx U1, AO1, T1 + + LOAD_Y_8 + + xvfmadd.s U4, U3, U1, U4 + + STORE_Y_8 + + alsl.d IY, INCY, IY, 3 + + LOAD_X_8 + + xvfmadd.s U2, U1, U4, U2 + + alsl.d IX, INCX, IX, 3 addi.d II, II, 32 addi.d T1, T1, 32 addi.d I, I, 1 blt I, T0, .L02 + //Acc U2 + GACC xvf, s, U4, U2 + xvreplve0.d U2, U4 + .L03: /* &4 */ sub.d T0, M, J addi.d T0, T0, -1 @@ -433,4 +443,4 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi.d $sp, $sp, 88 jirl $r0, $r1, 0x0 - EPILOGUE \ No newline at end of file + EPILOGUE diff --git a/kernel/loongarch64/ssymv_L_lsx.S b/kernel/loongarch64/ssymv_L_lsx.S index 949e9e9025..a98cad38bf 100644 --- a/kernel/loongarch64/ssymv_L_lsx.S +++ b/kernel/loongarch64/ssymv_L_lsx.S @@ -28,6 +28,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define ASSEMBLER #include "common.h" +#include "loongarch64_asm.S" /* Param */ #define M $r4 @@ -57,6 +58,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define T2 $r28 #define T3 $r29 #define T4 $r30 +#define T5 $r17 +#define T6 $r16 /* LSX vectors */ #define U0 $vr31 @@ -88,77 +91,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define a9 $f9 - PROLOGUE - - LDARG BUFFER, $sp, 0 - - addi.d $sp, $sp, -88 - - SDARG $r23, $sp, 0 - SDARG $r24, $sp, 8 - SDARG $r25, $sp, 16 - SDARG $r26, $sp, 32 - SDARG $r27, $sp, 40 - SDARG $r28, $sp, 48 - SDARG $r29, $sp, 56 - SDARG $r30, $sp, 64 - SDARG $r31, $sp, 72 - ST ALPHA, $sp, 80 - - vldrepl.w VALPHA, $sp, 80 - - slli.d LDA, LDA, BASE_SHIFT - slli.d INCX, INCX, BASE_SHIFT - slli.d INCY, INCY, BASE_SHIFT - - bge $r0, M, .L999 - bge $r0, N, .L999 - - move J, $r0 - move JY, $r0 - move JX, $r0 - move AO1, A - - beq J, N, .L999 - -.L01: - MTC a2, $r0 //temp2 - fldx.s a6, X, JX - fmul.s a3, ALPHA, a6 //temp1 - vpermi.w U3, U3, 0x00 - vpermi.w U2, U2, 0x00 - - mul.w T0, J, LDA - slli.d T1, J, BASE_SHIFT - add.w T0, T0, T1 - fldx.s a6, AO1, T0 - fldx.s a4, Y, JY - fmadd.s a4, a3, a6, a4 - fstx.s a4, Y, JY - - move IY, JY - move IX, JX - addi.d II, J, 1 - move I, II - slli.d II, II, BASE_SHIFT - - sub.d T0, M, J - addi.d T0, T0, -1 - srai.d T0, T0, 3 - add.d T0, T0, J - addi.d T0, T0, 1 - beq I, T0, .L03 - bge I, T0, .L03 - - mul.w T1, J, LDA - add.d T1, T1, II - -.L02: /* /8 */ - vldx U1, AO1, T1 - addi.d T1, T1, 16 - vldx U14, AO1, T1 - addi.d T1, T1, 16 - +.macro LOAD_Y_8 + beqz T5, .L01_Y_0 add.d T2, IY, INCY fldx.s $f4, Y, T2 add.d T2, T2, INCY @@ -183,10 +117,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vextrins.w U8, U9, 0x10 vextrins.w U8, U10, 0x20 vextrins.w U8, U11, 0x30 - - vfmadd.s U4, U3, U1, U4 - vfmadd.s U8, U3, U14, U8 - + b .L01_Y_1 +.L01_Y_0: + add.d T3, IY, INCY + vldx U4, Y, T3 + alsl.d T4, INCY, T3, 2 + vldx U8, Y, T4 +.L01_Y_1: +.endm + +.macro STORE_Y_8 + beqz T5, .L01_Y_2 vextrins.w U5, U4, 0x01 vextrins.w U6, U4, 0x02 vextrins.w U7, U4, 0x03 @@ -211,10 +152,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fstx.s $f10, Y, T2 add.d T2, T2, INCY fstx.s $f11, Y, T2 - - slli.d T2, INCY, 3 - add.d IY, IY, T2 - + b .L01_Y_3 +.L01_Y_2: + vstx U4, Y, T3 + vstx U8, Y, T4 +.L01_Y_3: +.endm + +.macro LOAD_X_8 + beqz T6, .L01_X_0 add.d T2, IX, INCX fldx.s $f4, X, T2 add.d T2, T2, INCX @@ -239,31 +185,109 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vextrins.w $vr8, $vr9, 0x10 vextrins.w $vr8, $vr10, 0x20 vextrins.w $vr8, $vr11, 0x30 + b .L01_X_1 +.L01_X_0: + add.d T3, IX, INCX + vldx U4, X, T3 + alsl.d T4, INCX, T3, 2 + vldx U8, X, T4 +.L01_X_1: +.endm - vand.v $vr12, $vr2, $vr2 + PROLOGUE - vfmadd.s U2, U1, U4, U2 - vfsub.s U2, U2, $vr12 - vfmadd.s U2, U14, U8, U2 + addi.d $sp, $sp, -88 - vextrins.w U4, U2, 0x01 - vextrins.w U5, U2, 0x02 - vextrins.w U6, U2, 0x03 + SDARG $r23, $sp, 0 + SDARG $r24, $sp, 8 + SDARG $r25, $sp, 16 + SDARG $r26, $sp, 32 + SDARG $r27, $sp, 40 + SDARG $r28, $sp, 48 + SDARG $r29, $sp, 56 + SDARG $r30, $sp, 64 + SDARG $r31, $sp, 72 + ST ALPHA, $sp, 80 - fadd.s $f2, $f2, $f4 - fadd.s $f2, $f2, $f5 - fadd.s $f2, $f2, $f6 - fadd.s $f2, $f2, $f12 + vldrepl.w VALPHA, $sp, 80 - vpermi.w U2, U2, 0x00 + addi.d T5, INCY, -1 + addi.d T6, INCX, -1 + slli.d LDA, LDA, BASE_SHIFT + slli.d INCX, INCX, BASE_SHIFT + slli.d INCY, INCY, BASE_SHIFT + + bge $r0, M, .L999 + bge $r0, N, .L999 + + move J, $r0 + move JY, $r0 + move JX, $r0 + move AO1, A + + beq J, N, .L999 + +.L01: + vxor.v U2, U2, U2 + fldx.s a6, X, JX + fmul.s a3, ALPHA, a6 //temp1 + vpermi.w U3, U3, 0x00 + + mul.w T0, J, LDA + slli.d T1, J, BASE_SHIFT + add.w T0, T0, T1 + fldx.s a6, AO1, T0 + fldx.s a4, Y, JY + fmadd.s a4, a3, a6, a4 + fstx.s a4, Y, JY + + move IY, JY + move IX, JX + addi.d II, J, 1 + move I, II + slli.d II, II, BASE_SHIFT - slli.d T2, INCX, 3 - add.d IX, IX, T2 + sub.d T0, M, J + addi.d T0, T0, -1 + srai.d T0, T0, 3 + add.d T0, T0, J + addi.d T0, T0, 1 + beq I, T0, .L03 + bge I, T0, .L03 + + mul.w T1, J, LDA + add.d T1, T1, II + +.L02: /* /8 */ + vldx U1, AO1, T1 + addi.d T1, T1, 16 + vldx U14, AO1, T1 + addi.d T1, T1, 16 + + LOAD_Y_8 + + vfmadd.s U4, U3, U1, U4 + vfmadd.s U8, U3, U14, U8 + + STORE_Y_8 + + alsl.d IY, INCY, IY, 3 + + LOAD_X_8 + + vfmadd.s U2, U1, U4, U2 + vfmadd.s U2, U14, U8, U2 + + alsl.d IX, INCX, IX, 3 addi.d II, II, 32 addi.d I, I, 1 blt I, T0, .L02 + // Acc U2 + GACC vf, s, U4, U2 + vpermi.w U2, U4, 0 + .L03: /* &4 */ sub.d T0, M, J addi.d T0, T0, -1 @@ -426,4 +450,4 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi.d $sp, $sp, 88 jirl $r0, $r1, 0x0 - EPILOGUE \ No newline at end of file + EPILOGUE diff --git a/kernel/loongarch64/ssymv_U_lasx.S b/kernel/loongarch64/ssymv_U_lasx.S index bd6fd3dd7a..662f311d94 100644 --- a/kernel/loongarch64/ssymv_U_lasx.S +++ b/kernel/loongarch64/ssymv_U_lasx.S @@ -28,6 +28,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define ASSEMBLER #include "common.h" +#include "loongarch64_asm.S" /* Param */ #define M $r4 @@ -57,6 +58,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define T2 $r28 #define T3 $r29 #define T4 $r30 +#define T5 $r17 +#define T6 $r16 /* LSX vectors */ #define U0 $xr31 @@ -87,64 +90,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define a8 $f8 #define a9 $f9 - - PROLOGUE - - LDARG BUFFER, $sp, 0 - - addi.d $sp, $sp, -88 - - SDARG $r23, $sp, 0 - SDARG $r24, $sp, 8 - SDARG $r25, $sp, 16 - SDARG $r26, $sp, 32 - SDARG $r27, $sp, 40 - SDARG $r28, $sp, 48 - SDARG $r29, $sp, 56 - SDARG $r30, $sp, 64 - SDARG $r31, $sp, 72 - ST ALPHA, $sp, 80 - - xvldrepl.w VALPHA, $sp, 80 - - slli.d LDA, LDA, BASE_SHIFT - slli.d INCX, INCX, BASE_SHIFT - slli.d INCY, INCY, BASE_SHIFT - - bge $r0, M, .L999 - bge $r0, N, .L999 - - sub.d M1, M, N - - mul.d JY, M1, INCY - mul.d JX, M1, INCX - - move J, M1 - move AO1, A - - beq J, M, .L999 - -.L01: - MTC $f2, $r0 //temp2 - fldx.s $f6, X, JX - fmul.s $f3, ALPHA, $f6 //temp1 - xvreplve0.w U3, U3 - xvreplve0.w U2, U2 - - move IY, $r0 - move IX, $r0 - move II, $r0 - move I, $r0 - - srai.d T0, J, 3 - beq I, T0, .L03 - - mul.w T1, J, LDA - add.d T1, T1, II - -.L02: /* /8 */ - xvldx U1, AO1, T1 - +.macro LOAD_Y_8 + beqz T5, .L01_Y_0 fldx.s $f4, Y, IY add.d T2, IY, INCY fldx.s $f5, Y, T2 @@ -168,10 +115,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vextrins.w $vr8, $vr9, 0x10 vextrins.w $vr8, $vr10, 0x20 vextrins.w $vr8, $vr11, 0x30 - xvpermi.q U4, U8, 0x02 - - xvfmadd.s U4, U3, U1, U4 - + xvpermi.q U4, U8, 0x02 + b .L01_Y_1 +.L01_Y_0: + xvldx U4, Y, IY +.L01_Y_1: +.endm + +.macro STORE_Y_8 + beqz T5, .L01_Y_2 xvpermi.d U8, U4, 0xee vextrins.w $vr5, $vr4, 0x01 vextrins.w $vr6, $vr4, 0x02 @@ -196,10 +148,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fstx.s $f10, Y, T2 add.d T2, T2, INCY fstx.s $f11, Y, T2 - - slli.d T2, INCY, 3 - add.d IY, IY, T2 - + b .L01_Y_3 +.L01_Y_2: + xvstx U4, Y, IY +.L01_Y_3: +.endm + +.macro LOAD_X_8 + beqz T6, .L01_X_0 fldx.s $f4, X, IX add.d T2, IX, INCX fldx.s $f5, X, T2 @@ -224,39 +180,91 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vextrins.w $vr8, $vr10, 0x20 vextrins.w $vr8, $vr11, 0x30 xvpermi.q U4, U8, 0x02 + b .L01_X_1 +.L01_X_0: + xvldx U4, X, IX +.L01_X_1: +.endm + + PROLOGUE - xvand.v $xr12, $xr2, $xr2 + addi.d $sp, $sp, -88 - xvfmadd.s U2, U1, U4, U2 - xvfsub.s U2, U2, $xr12 + SDARG $r23, $sp, 0 + SDARG $r24, $sp, 8 + SDARG $r25, $sp, 16 + SDARG $r26, $sp, 32 + SDARG $r27, $sp, 40 + SDARG $r28, $sp, 48 + SDARG $r29, $sp, 56 + SDARG $r30, $sp, 64 + SDARG $r31, $sp, 72 + ST ALPHA, $sp, 80 - xvpickve.w U4, U2, 0x01 - xvpickve.w U5, U2, 0x02 - xvpickve.w U6, U2, 0x03 - xvpickve.w U7, U2, 0x04 - xvpickve.w U8, U2, 0x05 - xvpickve.w U9, U2, 0x06 - xvpickve.w U10, U2, 0x07 + xvldrepl.w VALPHA, $sp, 80 - fadd.s $f2, $f2, $f4 - fadd.s $f2, $f2, $f5 - fadd.s $f2, $f2, $f6 - fadd.s $f2, $f2, $f7 - fadd.s $f2, $f2, $f8 - fadd.s $f2, $f2, $f9 - fadd.s $f2, $f2, $f10 - fadd.s $f2, $f2, $f12 + addi.d T5, INCY, -1 + addi.d T6, INCX, -1 + slli.d LDA, LDA, BASE_SHIFT + slli.d INCX, INCX, BASE_SHIFT + slli.d INCY, INCY, BASE_SHIFT - xvreplve0.d U2, U2 + bge $r0, M, .L999 + bge $r0, N, .L999 + + sub.d M1, M, N + + mul.d JY, M1, INCY + mul.d JX, M1, INCX + + move J, M1 + move AO1, A + + beq J, M, .L999 + +.L01: + xvxor.v U2, U2, U2 + fldx.s $f6, X, JX + fmul.s $f3, ALPHA, $f6 //temp1 + xvreplve0.w U3, U3 - slli.d T2, INCX, 3 - add.d IX, IX, T2 + move IY, $r0 + move IX, $r0 + move II, $r0 + move I, $r0 + + srai.d T0, J, 3 + beq I, T0, .L03 + + mul.w T1, J, LDA + add.d T1, T1, II + +.L02: /* /8 */ + xvldx U1, AO1, T1 + + LOAD_Y_8 + + xvfmadd.s U4, U3, U1, U4 + + STORE_Y_8 + + alsl.d IY, INCY, IY, 3 + + LOAD_X_8 + + xvfmadd.s U2, U1, U4, U2 + + alsl.d IX, INCX, IX, 3 addi.d II, II, 32 addi.d T1, T1, 32 addi.d I, I, 1 blt I, T0, .L02 + //Acc U2 + GACC xvf, s, U4, U2 + xvreplve0.d U2, U4 + .L03: /* &4 */ andi T0, J, 4 beq $r0, T0, .L04 @@ -421,4 +429,4 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi.d $sp, $sp, 88 jirl $r0, $r1, 0x0 - EPILOGUE \ No newline at end of file + EPILOGUE diff --git a/kernel/loongarch64/ssymv_U_lsx.S b/kernel/loongarch64/ssymv_U_lsx.S index f3898e1483..7ff9b9b7b3 100644 --- a/kernel/loongarch64/ssymv_U_lsx.S +++ b/kernel/loongarch64/ssymv_U_lsx.S @@ -28,6 +28,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define ASSEMBLER #include "common.h" +#include "loongarch64_asm.S" /* Param */ #define M $r4 @@ -57,6 +58,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define T2 $r28 #define T3 $r29 #define T4 $r30 +#define T5 $r17 +#define T6 $r16 /* LSX vectors */ #define U0 $vr31 @@ -87,67 +90,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define a8 $f8 #define a9 $f9 - - PROLOGUE - - LDARG BUFFER, $sp, 0 - - addi.d $sp, $sp, -88 - - SDARG $r23, $sp, 0 - SDARG $r24, $sp, 8 - SDARG $r25, $sp, 16 - SDARG $r26, $sp, 32 - SDARG $r27, $sp, 40 - SDARG $r28, $sp, 48 - SDARG $r29, $sp, 56 - SDARG $r30, $sp, 64 - SDARG $r31, $sp, 72 - ST ALPHA, $sp, 80 - - vldrepl.w VALPHA, $sp, 80 - - slli.d LDA, LDA, BASE_SHIFT - slli.d INCX, INCX, BASE_SHIFT - slli.d INCY, INCY, BASE_SHIFT - - bge $r0, M, .L999 - bge $r0, N, .L999 - - sub.d M1, M, N - - mul.d JY, M1, INCY - mul.d JX, M1, INCX - - move J, M1 - move AO1, A - - beq J, M, .L999 - -.L01: - MTC $f2, $r0 //temp2 - fldx.s $f6, X, JX - fmul.s $f3, ALPHA, $f6 //temp1 - vpermi.w U3, U3, 0x00 - vpermi.w U2, U2, 0x00 - - move IY, $r0 - move IX, $r0 - move II, $r0 - move I, $r0 - - srai.d T0, J, 3 - beq I, T0, .L03 - - mul.w T1, J, LDA - add.d T1, T1, II - -.L02: /* /8 */ - vldx U1, AO1, T1 - addi.d T1, T1, 16 - vldx U14, AO1, T1 - addi.d T1, T1, 16 - +.macro LOAD_Y_8 + beqz T5, .L01_Y_0 fldx.s $f4, Y, IY add.d T2, IY, INCY fldx.s $f5, Y, T2 @@ -171,10 +115,16 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vextrins.w U8, U9, 0x10 vextrins.w U8, U10, 0x20 vextrins.w U8, U11, 0x30 - - vfmadd.s U4, U3, U1, U4 - vfmadd.s U8, U3, U14, U8 - + b .L01_Y_1 +.L01_Y_0: + vldx U4, Y, IY + alsl.d T2, INCY, IY, 2 + vldx U8, Y, T2 +.L01_Y_1: +.endm + +.macro STORE_Y_8 + beqz T5, .L01_Y_2 vextrins.w U5, U4, 0x01 vextrins.w U6, U4, 0x02 vextrins.w U7, U4, 0x03 @@ -198,10 +148,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fstx.s $f10, Y, T2 add.d T2, T2, INCY fstx.s $f11, Y, T2 - - slli.d T2, INCY, 3 - add.d IY, IY, T2 - + b .L01_Y_3 +.L01_Y_2: + vstx U4, Y, IY + vstx U8, Y, T2 +.L01_Y_3: +.endm + +.macro LOAD_X_8 + beqz T6, .L01_X_0 fldx.s $f4, X, IX add.d T2, IX, INCX fldx.s $f5, X, T2 @@ -225,31 +180,97 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vextrins.w $vr8, $vr9, 0x10 vextrins.w $vr8, $vr10, 0x20 vextrins.w $vr8, $vr11, 0x30 + b .L01_X_1 +.L01_X_0: + vldx U4, X, IX + alsl.d T3, INCX, IX, 2 + vldx U8, X, T3 +.L01_X_1: +.endm - vand.v $vr12, $vr2, $vr2 + PROLOGUE - vfmadd.s U2, U1, U4, U2 - vfsub.s U2, U2, $vr12 - vfmadd.s U2, U14, U8, U2 + addi.d $sp, $sp, -88 - vextrins.w U4, U2, 0x01 - vextrins.w U5, U2, 0x02 - vextrins.w U6, U2, 0x03 + SDARG $r23, $sp, 0 + SDARG $r24, $sp, 8 + SDARG $r25, $sp, 16 + SDARG $r26, $sp, 32 + SDARG $r27, $sp, 40 + SDARG $r28, $sp, 48 + SDARG $r29, $sp, 56 + SDARG $r30, $sp, 64 + SDARG $r31, $sp, 72 + ST ALPHA, $sp, 80 - fadd.s $f2, $f2, $f4 - fadd.s $f2, $f2, $f5 - fadd.s $f2, $f2, $f6 - fadd.s $f2, $f2, $f12 + vldrepl.w VALPHA, $sp, 80 - vpermi.w U2, U2, 0x00 + addi.d T5, INCY, -1 + addi.d T6, INCX, -1 + slli.d LDA, LDA, BASE_SHIFT + slli.d INCX, INCX, BASE_SHIFT + slli.d INCY, INCY, BASE_SHIFT + + bge $r0, M, .L999 + bge $r0, N, .L999 + + sub.d M1, M, N + + mul.d JY, M1, INCY + mul.d JX, M1, INCX + + move J, M1 + move AO1, A + + beq J, M, .L999 + +.L01: + vxor.v U2, U2, U2 + fldx.s $f6, X, JX + fmul.s $f3, ALPHA, $f6 //temp1 + vpermi.w U3, U3, 0x00 + + move IY, $r0 + move IX, $r0 + move II, $r0 + move I, $r0 + + srai.d T0, J, 3 + beq I, T0, .L03 + + mul.w T1, J, LDA + add.d T1, T1, II - slli.d T2, INCX, 3 - add.d IX, IX, T2 +.L02: /* /8 */ + vldx U1, AO1, T1 + addi.d T1, T1, 16 + vldx U14, AO1, T1 + addi.d T1, T1, 16 + + LOAD_Y_8 + + vfmadd.s U4, U3, U1, U4 + vfmadd.s U8, U3, U14, U8 + + STORE_Y_8 + + alsl.d IY, INCY, IY, 3 + + LOAD_X_8 + + vfmadd.s U2, U1, U4, U2 + vfmadd.s U2, U14, U8, U2 + + alsl.d IX, INCX, IX, 3 addi.d II, II, 32 addi.d I, I, 1 blt I, T0, .L02 + // Acc U2 + GACC vf, s, U4, U2 + vpermi.w U2, U4, 0x00 + .L03: /* &4 */ andi T0, J, 4 beq $r0, T0, .L04 @@ -414,4 +435,4 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. addi.d $sp, $sp, 88 jirl $r0, $r1, 0x0 - EPILOGUE \ No newline at end of file + EPILOGUE diff --git a/kernel/mips/KERNEL b/kernel/mips/KERNEL index aeccfbf4c8..a6ad0bf028 100644 --- a/kernel/mips/KERNEL +++ b/kernel/mips/KERNEL @@ -43,4 +43,14 @@ ifndef ZGEMM_BETA ZGEMM_BETA = ../generic/zgemm_beta.c endif +ifndef SROTMKERNEL +SROTMKERNEL = ../generic/rotm.c +endif + +ifndef DROTMKERNEL +DROTMKERNEL = ../generic/rotm.c +endif +ifndef QROTMKERNEL +QROTMKERNEL = ../generic/rotm.c +endif diff --git a/kernel/mips/KERNEL.generic b/kernel/mips/KERNEL.generic index 17f2ef976b..1f03c65942 100644 --- a/kernel/mips/KERNEL.generic +++ b/kernel/mips/KERNEL.generic @@ -158,3 +158,15 @@ ZHEMV_L_KERNEL = ../generic/zhemv_k.c CGEMM3MKERNEL = ../generic/zgemm3mkernel_dump.c ZGEMM3MKERNEL = ../generic/zgemm3mkernel_dump.c + +ifndef SROTMKERNEL +SROTMKERNEL = ../generic/rotm.c +endif + +ifndef DROTMKERNEL +DROTMKERNEL = ../generic/rotm.c +endif + +ifndef QROTMKERNEL +QROTMKERNEL = ../generic/rotm.c +endif diff --git a/kernel/mips64/KERNEL b/kernel/mips64/KERNEL index 54939a9efe..2ebd8a5bda 100644 --- a/kernel/mips64/KERNEL +++ b/kernel/mips64/KERNEL @@ -199,3 +199,15 @@ endif ifndef IQMAXKERNEL IQMAXKERNEL = imax.S endif + +ifndef SROTMKERNEL +SROTMKERNEL = ../generic/rotm.c +endif + +ifndef DROTMKERNEL +DROTMKERNEL = ../generic/rotm.c +endif + +ifndef QROTMKERNEL +QROTMKERNEL = ../generic/rotm.c +endif diff --git a/kernel/mips64/KERNEL.generic b/kernel/mips64/KERNEL.generic index 17f2ef976b..1f03c65942 100644 --- a/kernel/mips64/KERNEL.generic +++ b/kernel/mips64/KERNEL.generic @@ -158,3 +158,15 @@ ZHEMV_L_KERNEL = ../generic/zhemv_k.c CGEMM3MKERNEL = ../generic/zgemm3mkernel_dump.c ZGEMM3MKERNEL = ../generic/zgemm3mkernel_dump.c + +ifndef SROTMKERNEL +SROTMKERNEL = ../generic/rotm.c +endif + +ifndef DROTMKERNEL +DROTMKERNEL = ../generic/rotm.c +endif + +ifndef QROTMKERNEL +QROTMKERNEL = ../generic/rotm.c +endif diff --git a/kernel/power/KERNEL b/kernel/power/KERNEL index 9070450f4b..45fe0dd292 100644 --- a/kernel/power/KERNEL +++ b/kernel/power/KERNEL @@ -73,3 +73,15 @@ endif ifndef IQMAXKERNEL IQMAXKERNEL = imax.S endif + +ifndef SROTMKERNEL +SROTMKERNEL = ../generic/rotm.c +endif + +ifndef DROTMKERNEL +DROTMKERNEL = ../generic/rotm.c +endif + +ifndef QROTMKERNEL +QROTMKERNEL = ../generic/rotm.c +endif diff --git a/kernel/power/scal.S b/kernel/power/scal.S index 5e92a88aa1..eceb9fe8ed 100644 --- a/kernel/power/scal.S +++ b/kernel/power/scal.S @@ -59,7 +59,7 @@ #if !defined(__64BIT__) && defined(DOUBLE) #define X r8 #define INCX r9 -#define FLAG r13 +#define FLAG r11 #else #define X r7 #define INCX r8 @@ -91,7 +91,7 @@ fcmpu cr0, FZERO, ALPHA bne- cr0, LL(A1I1) - ld FLAG, 48+64+8(SP) + LDLONG FLAG, 48+64+8(SP) cmpwi cr0, FLAG, 1 beq- cr0, LL(A1I1) diff --git a/kernel/riscv64/KERNEL b/kernel/riscv64/KERNEL index 68d68b5f86..cd94052035 100644 --- a/kernel/riscv64/KERNEL +++ b/kernel/riscv64/KERNEL @@ -27,4 +27,14 @@ ifndef ZGEMM_BETA ZGEMM_BETA = ../generic/zgemm_beta.c endif +ifndef SROTMKERNEL +SROTMKERNEL = ../generic/rotm.c +endif + +ifndef DROTMKERNEL +DROTMKERNEL = ../generic/rotm.c +endif +ifndef QROTMKERNEL +QROTMKERNEL = ../generic/rotm.c +endif diff --git a/kernel/riscv64/KERNEL.C910V b/kernel/riscv64/KERNEL.C910V index 2798a870ed..666b3cc5e9 100644 --- a/kernel/riscv64/KERNEL.C910V +++ b/kernel/riscv64/KERNEL.C910V @@ -71,6 +71,10 @@ DROTKERNEL = rot_vector.c CROTKERNEL = zrot_vector.c ZROTKERNEL = zrot_vector.c +SROTMKERNEL = ../generic/rotm.c +DROTMKERNEL = ../generic/rotm.c +QROTMKERNEL = ../generic/rotm.c + SSCALKERNEL = scal_vector.c DSCALKERNEL = scal_vector.c CSCALKERNEL = zscal_vector.c diff --git a/kernel/riscv64/KERNEL.RISCV64_GENERIC b/kernel/riscv64/KERNEL.RISCV64_GENERIC index 67f81cacda..cf7d15d36f 100644 --- a/kernel/riscv64/KERNEL.RISCV64_GENERIC +++ b/kernel/riscv64/KERNEL.RISCV64_GENERIC @@ -71,6 +71,10 @@ DROTKERNEL = ../riscv64/rot.c CROTKERNEL = ../riscv64/zrot.c ZROTKERNEL = ../riscv64/zrot.c +SROTMKERNEL = ../generic/rotm.c +DROTMKERNEL = ../generic/rotm.c +QROTMKERNEL = ../generic/rotm.c + SSCALKERNEL = ../riscv64/scal.c DSCALKERNEL = ../riscv64/scal.c CSCALKERNEL = ../riscv64/zscal.c diff --git a/kernel/riscv64/KERNEL.RISCV64_ZVL128B b/kernel/riscv64/KERNEL.RISCV64_ZVL128B index fec69ee094..7fbc26d213 100644 --- a/kernel/riscv64/KERNEL.RISCV64_ZVL128B +++ b/kernel/riscv64/KERNEL.RISCV64_ZVL128B @@ -71,6 +71,10 @@ DROTKERNEL = rot_rvv.c CROTKERNEL = zrot_rvv.c ZROTKERNEL = zrot_rvv.c +SROTMKERNEL = ../generic/rotm.c +DROTMKERNEL = ../generic/rotm.c +QROTMKERNEL = ../generic/rotm.c + SSCALKERNEL = scal_rvv.c DSCALKERNEL = scal_rvv.c CSCALKERNEL = zscal_rvv.c diff --git a/kernel/riscv64/KERNEL.RISCV64_ZVL256B b/kernel/riscv64/KERNEL.RISCV64_ZVL256B index d8690682f4..9915fd9496 100644 --- a/kernel/riscv64/KERNEL.RISCV64_ZVL256B +++ b/kernel/riscv64/KERNEL.RISCV64_ZVL256B @@ -66,6 +66,10 @@ DROTKERNEL = rot_vector.c CROTKERNEL = zrot_vector.c ZROTKERNEL = zrot_vector.c +SROTMKERNEL = ../generic/rotm.c +DROTMKERNEL = ../generic/rotm.c +QROTMKERNEL = ../generic/rotm.c + SSCALKERNEL = scal_vector.c DSCALKERNEL = scal_vector.c CSCALKERNEL = zscal_vector.c diff --git a/kernel/riscv64/KERNEL.x280 b/kernel/riscv64/KERNEL.x280 index 86708fe015..18515e812f 100644 --- a/kernel/riscv64/KERNEL.x280 +++ b/kernel/riscv64/KERNEL.x280 @@ -98,6 +98,10 @@ DROTKERNEL = rot_rvv.c CROTKERNEL = zrot_rvv.c ZROTKERNEL = zrot_rvv.c +SROTMKERNEL = rotm_rvv.c +DROTMKERNEL = rotm_rvv.c +QROTMKERNEL = ../generic/rotm.c + SSCALKERNEL = scal_rvv.c DSCALKERNEL = scal_rvv.c CSCALKERNEL = zscal_rvv.c @@ -279,3 +283,9 @@ endif ifndef ZGEMM_BETA ZGEMM_BETA = zgemm_beta_rvv.c endif + +ZOMATCOPY_CN = zomatcopy_cn_rvv.c +COMATCOPY_CN = zomatcopy_cn_rvv.c + +DOMATCOPY_CN = omatcopy_cn_rvv.c +SOMATCOPY_CN = omatcopy_cn_rvv.c \ No newline at end of file diff --git a/kernel/riscv64/gemm_tcopy_8_rvv.c b/kernel/riscv64/gemm_tcopy_8_rvv.c index 4742ae6a75..c50b0d5b42 100644 --- a/kernel/riscv64/gemm_tcopy_8_rvv.c +++ b/kernel/riscv64/gemm_tcopy_8_rvv.c @@ -28,35 +28,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) __riscv_vsetvl_e32m1(n) -#define FLOAT_V_T vfloat32m1_t -#define FLOAT_VX2_T vfloat32m1x2_t -#define FLOAT_VX4_T vfloat32m1x4_t -#define FLOAT_VX8_T vfloat32m1x8_t -#define VLEV_FLOAT __riscv_vle32_v_f32m1 -#define VLSEV_FLOAT __riscv_vlse32_v_f32m1 -#define VSEV_FLOAT __riscv_vse32_v_f32m1 -#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m1x2 -#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m1x2 -#define VLSSEG4_FLOAT __riscv_vlsseg4e32_v_f32m1x4 -#define VSSEG4_FLOAT __riscv_vsseg4e32_v_f32m1x4 -#define VLSSEG8_FLOAT __riscv_vlsseg8e32_v_f32m1x8 -#define VSSEG8_FLOAT __riscv_vsseg8e32_v_f32m1x8 +#define FLOAT_V_T vfloat32m2_t +#define FLOAT_V_T_HALF vfloat32m1_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VLEV_FLOAT_HALF __riscv_vle32_v_f32m1 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VSEV_FLOAT_HALF __riscv_vse32_v_f32m1 #else -#define VSETVL(n) __riscv_vsetvl_e64m1(n) -#define FLOAT_V_T vfloat64m1_t -#define FLOAT_VX2_T vfloat64m1x2_t -#define FLOAT_VX4_T vfloat64m1x4_t -#define FLOAT_VX8_T vfloat64m1x8_t -#define VLEV_FLOAT __riscv_vle64_v_f64m1 -#define VLSEV_FLOAT __riscv_vlse64_v_f64m1 -#define VSEV_FLOAT __riscv_vse64_v_f64m1 -#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m1x2 -#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m1x2 -#define VLSSEG4_FLOAT __riscv_vlsseg4e64_v_f64m1x4 -#define VSSEG4_FLOAT __riscv_vsseg4e64_v_f64m1x4 -#define VLSSEG8_FLOAT __riscv_vlsseg8e64_v_f64m1x8 -#define VSSEG8_FLOAT __riscv_vsseg8e64_v_f64m1x8 +#define FLOAT_V_T vfloat64m4_t +#define FLOAT_V_T_HALF vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m4 +#define VLEV_FLOAT_HALF __riscv_vle64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m4 +#define VSEV_FLOAT_HALF __riscv_vse64_v_f64m2 #endif int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) @@ -69,9 +53,7 @@ int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) IFLOAT *boffset, *boffset1, *boffset2, *boffset3, *boffset4; FLOAT_V_T v0; - FLOAT_VX2_T vx2; - FLOAT_VX4_T vx4; - FLOAT_VX8_T vx8; + FLOAT_V_T_HALF v1; // fprintf(stderr, "gemm_tcopy_8 m=%ld n=%ld lda=%ld\n", m, n, lda); @@ -81,156 +63,12 @@ int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) boffset3 = b + m * (n & ~3); boffset4 = b + m * (n & ~1); - for(j = (m >> 3); j > 0; j--) { - - aoffset1 = aoffset; - aoffset += 8 * lda; - - boffset1 = boffset; - boffset += 64; - - for(i = (n >> 3); i > 0; i--) { - size_t vl = 8; - - vx8 = VLSSEG8_FLOAT(aoffset1, lda * sizeof(FLOAT), vl); - VSSEG8_FLOAT(boffset1, vx8, vl); - - aoffset1 += 8; - boffset1 += m * 8; - } - - if (n & 4) { - size_t vl = 8; - - vx4 = VLSSEG4_FLOAT(aoffset1, lda * sizeof(FLOAT), vl); - VSSEG4_FLOAT(boffset2, vx4, vl); - - aoffset1 += 4; - boffset2 += 32; - } - - if (n & 2) { - size_t vl = 8; - - vx2 = VLSSEG2_FLOAT(aoffset1, lda * sizeof(FLOAT), vl); - VSSEG2_FLOAT(boffset3, vx2, vl); - - aoffset1 += 2; - boffset3 += 16; - } - - if (n & 1) { - size_t vl = 8; - - v0 = VLSEV_FLOAT(aoffset1, lda * sizeof(FLOAT), vl); - VSEV_FLOAT(boffset4, v0, vl); - - aoffset1 += 1; - boffset4 += 8; - } - - } - - if (m & 4) { - - aoffset1 = aoffset; - aoffset += 4 * lda; - - boffset1 = boffset; - boffset += 32; - - for(i = (n >> 3); i > 0; i--) { - size_t vl = 4; - - vx8 = VLSSEG8_FLOAT(aoffset1, lda * sizeof(FLOAT), vl); - VSSEG8_FLOAT(boffset1, vx8, vl); - - aoffset1 += 8; - boffset1 += m * 8; - } - - if (n & 4) { - size_t vl = 4; - - vx4 = VLSSEG4_FLOAT(aoffset1, lda * sizeof(FLOAT), vl); - VSSEG4_FLOAT(boffset2, vx4, vl); - - aoffset1 += 4; - boffset2 += 16; - } - - if (n & 2) { - size_t vl = 4; - - vx2 = VLSSEG2_FLOAT(aoffset1, lda * sizeof(FLOAT), vl); - VSSEG2_FLOAT(boffset3, vx2, vl); - - aoffset1 += 2; - boffset3 += 8; - } - - if (n & 1) { - size_t vl = 4; - - v0 = VLSEV_FLOAT(aoffset1, lda * sizeof(FLOAT), vl); - VSEV_FLOAT(boffset4, v0, vl); - - aoffset1 += 1; - boffset4 += 4; - } - } - - if (m & 2) { + for(j = m; j > 0; j--) { aoffset1 = aoffset; - aoffset += 2 * lda; - boffset1 = boffset; - boffset += 16; - - for(i = (n >> 3); i > 0; i--) { - size_t vl = 2; - vx8 = VLSSEG8_FLOAT(aoffset1, lda * sizeof(FLOAT), vl); - VSSEG8_FLOAT(boffset1, vx8, vl); - - aoffset1 += 8; - boffset1 += m * 8; - } - - if (n & 4) { - size_t vl = 2; - - vx4 = VLSSEG4_FLOAT(aoffset1, lda * sizeof(FLOAT), vl); - VSSEG4_FLOAT(boffset2, vx4, vl); - - aoffset1 += 4; - boffset2 += 8; - } - - if (n & 2) { - size_t vl = 2; - - vx2 = VLSSEG2_FLOAT(aoffset1, lda * sizeof(FLOAT), vl); - VSSEG2_FLOAT(boffset3, vx2, vl); - - aoffset1 += 2; - boffset3 += 4; - } - - if (n & 1) { - size_t vl = 2; - - v0 = VLSEV_FLOAT(aoffset1, lda * sizeof(FLOAT), vl); - VSEV_FLOAT(boffset4, v0, vl); - - aoffset1 += 1; - boffset4 += 2; - } - } - - if (m & 1) { - aoffset1 = aoffset; - boffset1 = boffset; + aoffset += lda; + boffset += 8; for(i = (n >> 3); i > 0; i--) { size_t vl = 8; @@ -245,27 +83,25 @@ int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) if (n & 4) { size_t vl = 4; - v0 = VLEV_FLOAT(aoffset1, vl); - VSEV_FLOAT(boffset2, v0, vl); + v1 = VLEV_FLOAT_HALF(aoffset1, vl); + VSEV_FLOAT_HALF(boffset2, v1, vl); aoffset1 += 4; - //boffset2 += 4; + boffset2 += 4; } if (n & 2) { - size_t vl = 2; - - v0 = VLEV_FLOAT(aoffset1, vl); - VSEV_FLOAT(boffset3, v0, vl); + *(boffset3) = *(aoffset1); + *(boffset3 + 1) = *(aoffset1 + 1); aoffset1 += 2; - // boffset3 += 2; + boffset3 += 2; } if (n & 1) { - *(boffset4) = *(aoffset1); - // aoffset1 ++; - // boffset4 ++; + *(boffset4) = *(aoffset1); + aoffset1 ++; + boffset4 ++; } } diff --git a/kernel/riscv64/nrm2_rvv.c b/kernel/riscv64/nrm2_rvv.c index 14ed68b0a0..472b1148eb 100644 --- a/kernel/riscv64/nrm2_rvv.c +++ b/kernel/riscv64/nrm2_rvv.c @@ -27,185 +27,223 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" -#if defined(DOUBLE) -#define VSETVL __riscv_vsetvl_e64m4 -#define FLOAT_V_T vfloat64m4_t -#define FLOAT_V_T_M1 vfloat64m1_t -#define VLEV_FLOAT __riscv_vle64_v_f64m4 -#define VLSEV_FLOAT __riscv_vlse64_v_f64m4 -#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4 -#define VFMVSF_FLOAT __riscv_vfmv_s_f_f64m4 -#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 -#define MASK_T vbool16_t -#define VFABS __riscv_vfabs_v_f64m4 -#define VMFNE __riscv_vmfne_vf_f64m4_b16 -#define VMFGT __riscv_vmfgt_vv_f64m4_b16 -#define VMFEQ __riscv_vmfeq_vf_f64m4_b16 -#define VCPOP __riscv_vcpop_m_b16 -#define VFREDMAX __riscv_vfredmax_vs_f64m4_f64m1 -#define VFREDMIN __riscv_vfredmin_vs_f64m4_f64m1 -#define VFIRST __riscv_vfirst_m_b16 -#define VRGATHER __riscv_vrgather_vx_f64m4 -#define VFDIV __riscv_vfdiv_vv_f64m4 -#define VFDIV_M __riscv_vfdiv_vv_f64m4_mu -#define VFMUL __riscv_vfmul_vv_f64m4 -#define VFMUL_M __riscv_vfmul_vv_f64m4_mu -#define VFMACC __riscv_vfmacc_vv_f64m4 -#define VFMACC_M __riscv_vfmacc_vv_f64m4_mu -#define VMSBF __riscv_vmsbf_m_b16 -#define VMSOF __riscv_vmsof_m_b16 -#define VMAND __riscv_vmand_mm_b16 -#define VMANDN __riscv_vmand_mm_b16 -#define VFREDSUM __riscv_vfredusum_vs_f64m4_f64m1 -#define VMERGE __riscv_vmerge_vvm_f64m4 -#define VSEV_FLOAT __riscv_vse64_v_f64m4 -#define EXTRACT_FLOAT0_V(v) __riscv_vfmv_f_s_f64m4_f64(v) -#define ABS fabs -#else -#define VSETVL __riscv_vsetvl_e32m4 +#if !defined(DOUBLE) +#define VSETVL(n) __riscv_vsetvl_e32m4(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m4() #define FLOAT_V_T vfloat32m4_t #define FLOAT_V_T_M1 vfloat32m1_t +#define MASK_T vbool8_t #define VLEV_FLOAT __riscv_vle32_v_f32m4 #define VLSEV_FLOAT __riscv_vlse32_v_f32m4 +#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f32m4_f32m1_tu +#define VFMACCVV_FLOAT_TU __riscv_vfmacc_vv_f32m4_tu #define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4 -#define VFMVSF_FLOAT __riscv_vfmv_s_f_f32m4 #define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 -#define MASK_T vbool8_t -#define VFABS __riscv_vfabs_v_f32m4 -#define VMFNE __riscv_vmfne_vf_f32m4_b8 -#define VMFGT __riscv_vmfgt_vv_f32m4_b8 -#define VMFEQ __riscv_vmfeq_vf_f32m4_b8 -#define VCPOP __riscv_vcpop_m_b8 -#define VFREDMAX __riscv_vfredmax_vs_f32m4_f32m1 -#define VFREDMIN __riscv_vfredmin_vs_f32m4_f32m1 -#define VFIRST __riscv_vfirst_m_b8 -#define VRGATHER __riscv_vrgather_vx_f32m4 -#define VFDIV __riscv_vfdiv_vv_f32m4 -#define VFDIV_M __riscv_vfdiv_vv_f32m4_mu -#define VFMUL __riscv_vfmul_vv_f32m4 -#define VFMUL_M __riscv_vfmul_vv_f32m4_mu -#define VFMACC __riscv_vfmacc_vv_f32m4 -#define VFMACC_M __riscv_vfmacc_vv_f32m4_mu -#define VMSBF __riscv_vmsbf_m_b8 -#define VMSOF __riscv_vmsof_m_b8 -#define VMAND __riscv_vmand_mm_b8 -#define VMANDN __riscv_vmand_mm_b8 -#define VFREDSUM __riscv_vfredusum_vs_f32m4_f32m1 -#define VMERGE __riscv_vmerge_vvm_f32m4 -#define VSEV_FLOAT __riscv_vse32_v_f32m4 -#define EXTRACT_FLOAT0_V(v) __riscv_vfmv_f_s_f32m4_f32(v) +#define VMFIRSTM __riscv_vfirst_m_b8 +#define VFREDMAXVS_FLOAT_TU __riscv_vfredmax_vs_f32m4_f32m1_tu +#define VFMVFS_FLOAT __riscv_vfmv_f_s_f32m1_f32 +#define VMFGTVF_FLOAT __riscv_vmfgt_vf_f32m4_b8 +#define VFDIVVF_FLOAT __riscv_vfdiv_vf_f32m4 +#define VFABSV_FLOAT __riscv_vfabs_v_f32m4 #define ABS fabsf +#else +#define VSETVL(n) __riscv_vsetvl_e64m4(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m4() +#define FLOAT_V_T vfloat64m4_t +#define FLOAT_V_T_M1 vfloat64m1_t +#define MASK_T vbool16_t +#define VLEV_FLOAT __riscv_vle64_v_f64m4 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m4 +#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f64m4_f64m1_tu +#define VFMACCVV_FLOAT_TU __riscv_vfmacc_vv_f64m4_tu +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 +#define VMFIRSTM __riscv_vfirst_m_b16 +#define VFREDMAXVS_FLOAT_TU __riscv_vfredmax_vs_f64m4_f64m1_tu +#define VFMVFS_FLOAT __riscv_vfmv_f_s_f64m1_f64 +#define VMFGTVF_FLOAT __riscv_vmfgt_vf_f64m4_b16 +#define VFDIVVF_FLOAT __riscv_vfdiv_vf_f64m4 +#define VFABSV_FLOAT __riscv_vfabs_v_f64m4 +#define ABS fabs #endif FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) { - BLASLONG i=0; - - if (n <= 0 || inc_x == 0) return(0.0); - if(n == 1) return (ABS(x[0])); - - unsigned int gvl = 0; - - MASK_T nonzero_mask; - MASK_T scale_mask; - - gvl = VSETVL(n); - FLOAT_V_T v0; - FLOAT_V_T v_ssq = VFMVVF_FLOAT(0, gvl); - FLOAT_V_T v_scale = VFMVVF_FLOAT(0, gvl); - - FLOAT scale = 0; - FLOAT ssq = 0; - unsigned int stride_x = inc_x * sizeof(FLOAT); - int idx = 0; - - if( n >= gvl && inc_x > 0 ) // don't pay overheads if we're not doing useful work - { - for(i=0; i 0 ){ + FLOAT_V_T vr, v0, v_zero; + unsigned int gvl = 0; + FLOAT_V_T_M1 v_res, v_z0; + gvl = VSETVL_MAX; + v_res = VFMVVF_FLOAT_M1(0, gvl); + v_z0 = VFMVVF_FLOAT_M1(0, gvl); + MASK_T mask; + BLASLONG index = 0; + + if (inc_x == 1) { + gvl = VSETVL(n); + vr = VFMVVF_FLOAT(0, gvl); + v_zero = VFMVVF_FLOAT(0, gvl); + for (i = 0, j = 0; i < n / gvl; i++) { + v0 = VLEV_FLOAT(&x[j], gvl); + // fabs(vector) + v0 = VFABSV_FLOAT(v0, gvl); + // if scale change + mask = VMFGTVF_FLOAT(v0, scale, gvl); + index = VMFIRSTM(mask, gvl); + if (index == -1) { // no elements greater than scale + if (scale != 0.0) { + v0 = VFDIVVF_FLOAT(v0, scale, gvl); + vr = VFMACCVV_FLOAT_TU(vr, v0, v0, gvl); + } + } + else { // found greater element + // ssq in vector vr: vr[0] + v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, gvl); + // total ssq before current vector + ssq += VFMVFS_FLOAT(v_res); + // find max + v_res = VFREDMAXVS_FLOAT_TU(v_res, v0, v_z0, gvl); + // update ssq before max_index + ssq = ssq * (scale / VFMVFS_FLOAT(v_res)) * (scale / VFMVFS_FLOAT(v_res)); + // update scale + scale = VFMVFS_FLOAT(v_res); + // ssq in vector vr + v0 = VFDIVVF_FLOAT(v0, scale, gvl); + vr = VFMACCVV_FLOAT_TU(v_zero, v0, v0, gvl); + } + j += gvl; + } + // ssq in vector vr: vr[0] + v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, gvl); + // total ssq now + ssq += VFMVFS_FLOAT(v_res); + + // tail processing + if(j < n){ + gvl = VSETVL(n-j); + v0 = VLEV_FLOAT(&x[j], gvl); + // fabs(vector) + v0 = VFABSV_FLOAT(v0, gvl); + // if scale change + mask = VMFGTVF_FLOAT(v0, scale, gvl); + index = VMFIRSTM(mask, gvl); + if (index == -1) { // no elements greater than scale + if(scale != 0.0) + v0 = VFDIVVF_FLOAT(v0, scale, gvl); + } else { // found greater element + // find max + v_res = VFREDMAXVS_FLOAT_TU(v_res, v0, v_z0, gvl); + // update ssq before max_index + ssq = ssq * (scale / VFMVFS_FLOAT(v_res))*(scale / VFMVFS_FLOAT(v_res)); + // update scale + scale = VFMVFS_FLOAT(v_res); + v0 = VFDIVVF_FLOAT(v0, scale, gvl); + } + vr = VFMACCVV_FLOAT_TU(v_zero, v0, v0, gvl); + // ssq in vector vr: vr[0] + v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, gvl); + // total ssq now + ssq += VFMVFS_FLOAT(v_res); + } + } + else { + gvl = VSETVL(n); + vr = VFMVVF_FLOAT(0, gvl); + v_zero = VFMVVF_FLOAT(0, gvl); + unsigned int stride_x = inc_x * sizeof(FLOAT); + int idx = 0, inc_v = inc_x * gvl; + for (i = 0, j = 0; i < n / gvl; i++) { + v0 = VLSEV_FLOAT(&x[idx], stride_x, gvl); + // fabs(vector) + v0 = VFABSV_FLOAT(v0, gvl); + // if scale change + mask = VMFGTVF_FLOAT(v0, scale, gvl); + index = VMFIRSTM(mask, gvl); + if (index == -1) {// no elements greater than scale + if(scale != 0.0){ + v0 = VFDIVVF_FLOAT(v0, scale, gvl); + vr = VFMACCVV_FLOAT_TU(vr, v0, v0, gvl); + } + } + else { // found greater element + // ssq in vector vr: vr[0] + v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, gvl); + // total ssq before current vector + ssq += VFMVFS_FLOAT(v_res); + // find max + v_res = VFREDMAXVS_FLOAT_TU(v_res, v0, v_z0, gvl); + // update ssq before max_index + ssq = ssq * (scale / VFMVFS_FLOAT(v_res))*(scale / VFMVFS_FLOAT(v_res)); + // update scale + scale = VFMVFS_FLOAT(v_res); + // ssq in vector vr + v0 = VFDIVVF_FLOAT(v0, scale, gvl); + vr = VFMACCVV_FLOAT_TU(v_zero, v0, v0, gvl); + } + j += gvl; + idx += inc_v; + } + // ssq in vector vr: vr[0] + v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, gvl); + // total ssq now + ssq += VFMVFS_FLOAT(v_res); + + // tail processing + if (j < n) { + gvl = VSETVL(n-j); + v0 = VLSEV_FLOAT(&x[idx], stride_x, gvl); + // fabs(vector) + v0 = VFABSV_FLOAT(v0, gvl); + // if scale change + mask = VMFGTVF_FLOAT(v0, scale, gvl); + index = VMFIRSTM(mask, gvl); + if(index == -1) { // no elements greater than scale + if(scale != 0.0) { + v0 = VFDIVVF_FLOAT(v0, scale, gvl); + vr = VFMACCVV_FLOAT_TU(v_zero, v0, v0, gvl); + } + } + else { // found greater element + // find max + v_res = VFREDMAXVS_FLOAT_TU(v_res, v0, v_z0, gvl); + // update ssq before max_index + ssq = ssq * (scale / VFMVFS_FLOAT(v_res))*(scale / VFMVFS_FLOAT(v_res)); + // update scale + scale = VFMVFS_FLOAT(v_res); + v0 = VFDIVVF_FLOAT(v0, scale, gvl); + vr = VFMACCVV_FLOAT_TU(v_zero, v0, v0, gvl); + } + // ssq in vector vr: vr[0] + v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, gvl); + // total ssq now + ssq += VFMVFS_FLOAT(v_res); + } + } + } + else{ + // using scalar ops when inc_x < 0 + n *= inc_x; while(abs(i) < abs(n)){ - if ( x[i] != 0.0 ){ - FLOAT absxi = ABS( x[i] ); - if ( scale < absxi ){ - ssq = 1 + ssq * ( scale / absxi ) * ( scale / absxi ); - scale = absxi ; - } - else{ - ssq += ( absxi/scale ) * ( absxi/scale ); - } - - } - - i += inc_x; + if ( x[i] != 0.0 ){ + FLOAT absxi = ABS( x[i] ); + if ( scale < absxi ){ + ssq = 1 + ssq * ( scale / absxi ) * ( scale / absxi ); + scale = absxi ; + } + else{ + ssq += ( absxi/scale ) * ( absxi/scale ); + } + + } + i += inc_x; } - + } return(scale * sqrt(ssq)); } diff --git a/kernel/riscv64/omatcopy_cn_rvv.c b/kernel/riscv64/omatcopy_cn_rvv.c new file mode 100644 index 0000000000..8cd1fb545e --- /dev/null +++ b/kernel/riscv64/omatcopy_cn_rvv.c @@ -0,0 +1,109 @@ +/*************************************************************************** +Copyright (c) 2013, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" +#include + +#if !defined(DOUBLE) +#define VSETVL_MAX __riscv_vsetvlmax_e32m8() +#define VSETVL(n) __riscv_vsetvl_e32m8(n) +#define FLOAT_V_T vfloat32m8_t +#define VLEV_FLOAT __riscv_vle32_v_f32m8 +#define VSEV_FLOAT __riscv_vse32_v_f32m8 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m8 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8 +#else +#define VSETVL_MAX __riscv_vsetvlmax_e64m8() +#define VSETVL(n) __riscv_vsetvl_e64m8(n) +#define FLOAT_V_T vfloat64m8_t +#define VLEV_FLOAT __riscv_vle64_v_f64m8 +#define VSEV_FLOAT __riscv_vse64_v_f64m8 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m8 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8 +#endif + + +int CNAME(BLASLONG rows, BLASLONG cols, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOAT *b, BLASLONG ldb) +{ + BLASLONG i,j; + FLOAT *aptr,*bptr; + size_t vl; + + FLOAT_V_T va, vb; + if ( rows <= 0 ) return(0); + if ( cols <= 0 ) return(0); + + aptr = a; + bptr = b; + + if ( alpha == 0.0 ) + { + vl = VSETVL_MAX; + va = VFMVVF_FLOAT(0, vl); + for ( i=0; i 0)) goto L70; + + nsteps = n * incx; + if (dflag < 0.) { + goto L50; + } else if (dflag == 0) { + goto L10; + } else { + goto L30; + } +L10: + dh12 = dparam[4]; + dh21 = dparam[3]; + i__1 = nsteps; + i__2 = incx; + if(i__2 < 0){ + offset = i__1 - 2; + dx += offset; + dy += offset; + i__1 = -i__1; + i__2 = -i__2; + } + stride = i__2 * sizeof(FLOAT); + n = i__1 / i__2; + for (size_t vl; n > 0; n -= vl, dx += vl*i__2, dy += vl*i__2) { + vl = VSETVL(n); + + v_w = VLSEV_FLOAT(&dx[1], stride, vl); + v_z__ = VLSEV_FLOAT(&dy[1], stride, vl); + + v_dx = VFMACCVF_FLOAT(v_w, dh12, v_z__, vl); + v_dy = VFMACCVF_FLOAT(v_z__, dh21, v_w, vl); + + VSSEV_FLOAT(&dx[1], stride, v_dx, vl); + VSSEV_FLOAT(&dy[1], stride, v_dy, vl); + } + goto L140; +L30: + dh11 = dparam[2]; + dh22 = dparam[5]; + i__2 = nsteps; + i__1 = incx; + if(i__1 < 0){ + offset = i__2 - 2; + dx += offset; + dy += offset; + i__1 = -i__1; + i__2 = -i__2; + } + stride = i__1 * sizeof(FLOAT); + n = i__2 / i__1; + for (size_t vl; n > 0; n -= vl, dx += vl*i__1, dy += vl*i__1) { + vl = VSETVL(n); + + v_w = VLSEV_FLOAT(&dx[1], stride, vl); + v_z__ = VLSEV_FLOAT(&dy[1], stride, vl); + + v_dx = VFMACCVF_FLOAT(v_z__, dh11, v_w, vl); + v_dy = VFMSACVF_FLOAT(v_w, dh22, v_z__, vl); + + VSSEV_FLOAT(&dx[1], stride, v_dx, vl); + VSSEV_FLOAT(&dy[1], stride, v_dy, vl); + } + goto L140; +L50: + dh11 = dparam[2]; + dh12 = dparam[4]; + dh21 = dparam[3]; + dh22 = dparam[5]; + i__1 = nsteps; + i__2 = incx; + if(i__2 < 0){ + offset = i__1 - 2; + dx += offset; + dy += offset; + i__1 = -i__1; + i__2 = -i__2; + } + stride = i__2 * sizeof(FLOAT); + n = i__1 / i__2; + for (size_t vl; n > 0; n -= vl, dx += vl*i__2, dy += vl*i__2) { + vl = VSETVL(n); + + v_w = VLSEV_FLOAT(&dx[1], stride, vl); + v_z__ = VLSEV_FLOAT(&dy[1], stride, vl); + + v_dx = VFMULVF_FLOAT(v_w, dh11, vl); + v_dx = VFMACCVF_FLOAT(v_dx, dh12, v_z__, vl); + VSSEV_FLOAT(&dx[1], stride, v_dx, vl); + + v_dy = VFMULVF_FLOAT(v_w, dh21, vl); + v_dy = VFMACCVF_FLOAT(v_dy, dh22, v_z__, vl); + VSSEV_FLOAT(&dy[1], stride, v_dy, vl); + } + goto L140; +L70: + kx = 1; + ky = 1; + if (incx < 0) { + kx = (1 - n) * incx + 1; + } + if (incy < 0) { + ky = (1 - n) * incy + 1; + } + + if (dflag < 0.) { + goto L120; + } else if (dflag == 0) { + goto L80; + } else { + goto L100; + } +L80: + dh12 = dparam[4]; + dh21 = dparam[3]; + if(incx < 0){ + incx = -incx; + dx -= n*incx; + } + if(incy < 0){ + incy = -incy; + dy -= n*incy; + } + stride_x = incx * sizeof(FLOAT); + stride_y = incy * sizeof(FLOAT); + for (size_t vl; n > 0; n -= vl, dx += vl*incx, dy += vl*incy) { + vl = VSETVL(n); + + v_w = VLSEV_FLOAT(&dx[kx], stride_x, vl); + v_z__ = VLSEV_FLOAT(&dy[ky], stride_y, vl); + + v_dx = VFMACCVF_FLOAT(v_w, dh12, v_z__, vl); + v_dy = VFMACCVF_FLOAT(v_z__, dh21, v_w, vl); + + VSSEV_FLOAT(&dx[kx], stride_x, v_dx, vl); + VSSEV_FLOAT(&dy[ky], stride_y, v_dy, vl); + } + goto L140; +L100: + dh11 = dparam[2]; + dh22 = dparam[5]; + if(incx < 0){ + incx = -incx; + dx -= n*incx; + } + if(incy < 0){ + incy = -incy; + dy -= n*incy; + } + stride_x = incx * sizeof(FLOAT); + stride_y = incy * sizeof(FLOAT); + for (size_t vl; n > 0; n -= vl, dx += vl*incx, dy += vl*incy) { + vl = VSETVL(n); + + v_w = VLSEV_FLOAT(&dx[kx], stride_x, vl); + v_z__ = VLSEV_FLOAT(&dy[ky], stride_y, vl); + + v_dx = VFMACCVF_FLOAT(v_z__, dh11, v_w, vl); + v_dy = VFMSACVF_FLOAT(v_w, dh22, v_z__, vl); + + VSSEV_FLOAT(&dx[kx], stride_x, v_dx, vl); + VSSEV_FLOAT(&dy[ky], stride_y, v_dy, vl); + } + goto L140; +L120: + dh11 = dparam[2]; + dh12 = dparam[4]; + dh21 = dparam[3]; + dh22 = dparam[5]; + if(incx < 0){ + incx = -incx; + dx -= n*incx; + } + if(incy < 0){ + incy = -incy; + dy -= n*incy; + } + stride_x = incx * sizeof(FLOAT); + stride_y = incy * sizeof(FLOAT); + for (size_t vl; n > 0; n -= vl, dx += vl*incx, dy += vl*incy) { + vl = VSETVL(n); + + v_w = VLSEV_FLOAT(&dx[kx], stride_x, vl); + v_z__ = VLSEV_FLOAT(&dy[ky], stride_y, vl); + + v_dx = VFMULVF_FLOAT(v_w, dh11, vl); + v_dx = VFMACCVF_FLOAT(v_dx, dh12, v_z__, vl); + VSSEV_FLOAT(&dx[kx], stride_x, v_dx, vl); + + v_dy = VFMULVF_FLOAT(v_w, dh21, vl); + v_dy = VFMACCVF_FLOAT(v_dy, dh22, v_z__, vl); + VSSEV_FLOAT(&dy[ky], stride_y, v_dy, vl); + } +L140: + return(0); +} diff --git a/kernel/riscv64/symm_lcopy_rvv_v1.c b/kernel/riscv64/symm_lcopy_rvv_v1.c index a615db44d9..2e5bfc6caf 100644 --- a/kernel/riscv64/symm_lcopy_rvv_v1.c +++ b/kernel/riscv64/symm_lcopy_rvv_v1.c @@ -35,11 +35,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define VSEV_FLOAT __riscv_vse32_v_f32m2 #define VLSEV_FLOAT __riscv_vlse32_v_f32m2 #define INT_V_T vint32m2_t -#define VID_V_INT __riscv_vid_v_i32m2 +#define VID_V_INT __riscv_vid_v_u32m2 #define VADD_VX_INT __riscv_vadd_vx_i32m2 #define VMSGT_VX_INT __riscv_vmsgt_vx_i32m2_b16 #define VBOOL_T vbool16_t #define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f32m2 +#define V_UM2_TO_IM2 __riscv_vreinterpret_v_u32m2_i32m2 #else #define VSETVL(n) __riscv_vsetvl_e64m2(n) #define VSETVL_MAX __riscv_vsetvlmax_e64m2() @@ -48,11 +49,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define VSEV_FLOAT __riscv_vse64_v_f64m2 #define VLSEV_FLOAT __riscv_vlse64_v_f64m2 #define INT_V_T vint64m2_t -#define VID_V_INT __riscv_vid_v_i64m2 +#define VID_V_INT __riscv_vid_v_u64m2 #define VADD_VX_INT __riscv_vadd_vx_i64m2 #define VMSGT_VX_INT __riscv_vmsgt_vx_i64m2_b32 #define VBOOL_T vbool32_t #define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f64m2 +#define V_UM2_TO_IM2 __riscv_vreinterpret_v_u64m2_i64m2 #endif // Optimizes the implementation in ../generic/symm_lcopy_4.c @@ -70,7 +72,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON INT_V_T vindex_max, vindex; size_t vl = VSETVL_MAX; - vindex_max = VID_V_INT(vl); + vindex_max = V_UM2_TO_IM2(VID_V_INT(vl)); for (js = n; js > 0; js -= vl, posX += vl) { vl = VSETVL(js); @@ -98,4 +100,3 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON return 0; } - diff --git a/kernel/riscv64/symm_ucopy_rvv_v1.c b/kernel/riscv64/symm_ucopy_rvv_v1.c index 464f97b3a6..faab88a678 100644 --- a/kernel/riscv64/symm_ucopy_rvv_v1.c +++ b/kernel/riscv64/symm_ucopy_rvv_v1.c @@ -35,11 +35,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define VSEV_FLOAT __riscv_vse32_v_f32m2 #define VLSEV_FLOAT __riscv_vlse32_v_f32m2 #define INT_V_T vint32m2_t -#define VID_V_INT __riscv_vid_v_i32m2 +#define VID_V_INT __riscv_vid_v_u32m2 #define VADD_VX_INT __riscv_vadd_vx_i32m2 #define VMSGT_VX_INT __riscv_vmsgt_vx_i32m2_b16 #define VBOOL_T vbool16_t #define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f32m2 +#define V_UM2_TO_IM2 __riscv_vreinterpret_v_u32m2_i32m2 #else #define VSETVL(n) __riscv_vsetvl_e64m2(n) #define VSETVL_MAX __riscv_vsetvlmax_e64m2() @@ -48,11 +49,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define VSEV_FLOAT __riscv_vse64_v_f64m2 #define VLSEV_FLOAT __riscv_vlse64_v_f64m2 #define INT_V_T vint64m2_t -#define VID_V_INT __riscv_vid_v_i64m2 +#define VID_V_INT __riscv_vid_v_u64m2 #define VADD_VX_INT __riscv_vadd_vx_i64m2 #define VMSGT_VX_INT __riscv_vmsgt_vx_i64m2_b32 #define VBOOL_T vbool32_t #define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f64m2 +#define V_UM2_TO_IM2 __riscv_vreinterpret_v_u64m2_i64m2 #endif // Optimizes the implementation in ../generic/symm_ucopy_4.c @@ -70,7 +72,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON INT_V_T vindex_max, vindex; size_t vl = VSETVL_MAX; - vindex_max = VID_V_INT(vl); + vindex_max = V_UM2_TO_IM2(VID_V_INT(vl)); for (js = n; js > 0; js -= vl, posX += vl) { vl = VSETVL(js); @@ -97,4 +99,4 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON } return 0; -} +} \ No newline at end of file diff --git a/kernel/riscv64/zgemm_tcopy_4_rvv.c b/kernel/riscv64/zgemm_tcopy_4_rvv.c index cfafbf0dc7..9c194877a2 100644 --- a/kernel/riscv64/zgemm_tcopy_4_rvv.c +++ b/kernel/riscv64/zgemm_tcopy_4_rvv.c @@ -28,35 +28,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) __riscv_vsetvl_e32m1(n) -#define FLOAT_V_T vfloat32m1_t -#define FLOAT_VX2_T vfloat32m1x2_t -#define FLOAT_VX4_T vfloat32m1x4_t -#define FLOAT_VX8_T vfloat32m1x8_t -#define VLEV_FLOAT __riscv_vle32_v_f32m1 -#define VSEV_FLOAT __riscv_vse32_v_f32m1 -#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m1x2 -#define VLSSEG4_FLOAT __riscv_vlsseg4e32_v_f32m1x4 -#define VLSSEG8_FLOAT __riscv_vlsseg8e32_v_f32m1x8 -#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m1x2 -#define VSSEG4_FLOAT __riscv_vsseg4e32_v_f32m1x4 -#define VSSEG8_FLOAT __riscv_vsseg8e32_v_f32m1x8 +#define FLOAT_V_T vfloat32m2_t +#define FLOAT_V_T_HALF vfloat32m1_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VLEV_FLOAT_HALF __riscv_vle32_v_f32m1 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VSEV_FLOAT_HALF __riscv_vse32_v_f32m1 #else -#define VSETVL(n) __riscv_vsetvl_e64m1(n) -#define FLOAT_V_T vfloat64m1_t -#define FLOAT_VX2_T vfloat64m1x2_t -#define FLOAT_VX4_T vfloat64m1x4_t -#define FLOAT_VX8_T vfloat64m1x8_t -#define VLEV_FLOAT __riscv_vle64_v_f64m1 -#define VSEV_FLOAT __riscv_vse64_v_f64m1 -#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m1x2 -#define VLSSEG4_FLOAT __riscv_vlsseg4e64_v_f64m1x4 -#define VLSSEG8_FLOAT __riscv_vlsseg8e64_v_f64m1x8 -#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m1x2 -#define VSSEG4_FLOAT __riscv_vsseg4e64_v_f64m1x4 -#define VSSEG8_FLOAT __riscv_vsseg8e64_v_f64m1x8 +#define FLOAT_V_T vfloat64m4_t +#define FLOAT_V_T_HALF vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m4 +#define VLEV_FLOAT_HALF __riscv_vle64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m4 +#define VSEV_FLOAT_HALF __riscv_vse64_v_f64m2 #endif + int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, FLOAT *b){ BLASLONG i, j; @@ -67,9 +54,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, FLOAT *b){ IFLOAT *boffset, *boffset1, *boffset2, *boffset3; FLOAT_V_T v0; - FLOAT_VX2_T vx2; - FLOAT_VX4_T vx4; - FLOAT_VX8_T vx8; + FLOAT_V_T_HALF v1; size_t vl; @@ -80,86 +65,12 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, FLOAT *b){ boffset2 = b + 2 * m * (n & ~3); boffset3 = b + 2 * m * (n & ~1); - for(j = (m >> 2); j > 0; j--) { - - aoffset1 = aoffset; - aoffset += 8 * lda; - - boffset1 = boffset; - boffset += 32; - - for(i = (n >> 2); i > 0; i--) { - vl = 4; - - vx8 = VLSSEG8_FLOAT(aoffset1, lda * sizeof(FLOAT) * 2, vl); - VSSEG8_FLOAT(boffset1, vx8, vl); - - aoffset1 += 8; - boffset1 += m * 8; - } - - if (n & 2) { - vl = 4; - - vx4 = VLSSEG4_FLOAT(aoffset1, lda * sizeof(FLOAT) * 2, vl); - VSSEG4_FLOAT(boffset2, vx4, vl); - - aoffset1 += 4; - boffset2 += 16; - } - - if (n & 1) { - vl = 4; - - vx2 = VLSSEG2_FLOAT(aoffset1, lda * sizeof(FLOAT) * 2, vl); - VSSEG2_FLOAT(boffset3, vx2, vl); - - aoffset1 += 2; - boffset3 += 8; - } - } - - if (m & 2) { + for(j = m; j > 0; j--) { aoffset1 = aoffset; - aoffset += 4 * lda; - boffset1 = boffset; - boffset += 16; - - for(i = (n >> 2); i > 0; i--) { - vl = 2; - - vx8 = VLSSEG8_FLOAT(aoffset1, lda * sizeof(FLOAT) * 2, vl); - VSSEG8_FLOAT(boffset1, vx8, vl); - - aoffset1 += 8; - boffset1 += m * 8; - } - - if (n & 2) { - vl = 2; - - vx4 = VLSSEG4_FLOAT(aoffset1, lda * sizeof(FLOAT) * 2, vl); - VSSEG4_FLOAT(boffset2, vx4, vl); - - aoffset1 += 4; - boffset2 += 8; - } - - if (n & 1) { - vl = 2; - vx2 = VLSSEG2_FLOAT(aoffset1, lda * sizeof(FLOAT) * 2, vl); - VSSEG2_FLOAT(boffset3, vx2, vl); - - //aoffset1 += 2; - boffset3 += 4; - } - } - - if (m & 1) { - aoffset1 = aoffset; - boffset1 = boffset; + aoffset += 2 * lda; + boffset += 8; for(i = (n >> 2); i > 0; i--) { vl = 8; @@ -174,16 +85,19 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, FLOAT *b){ if (n & 2) { vl = 4; - v0 = VLEV_FLOAT(aoffset1, vl); - VSEV_FLOAT(boffset2, v0, vl); + v1 = VLEV_FLOAT_HALF(aoffset1, vl); + VSEV_FLOAT_HALF(boffset2, v1, vl); aoffset1 += 4; - //boffset2 += 4; + boffset2 += 4; } if (n & 1) { - *(boffset3) = *(aoffset1); - *(boffset3 + 1) = *(aoffset1 + 1); + *(boffset3) = *(aoffset1); + *(boffset3 + 1) = *(aoffset1 + 1); + + aoffset1 += 2; + boffset3 += 2; } } diff --git a/kernel/riscv64/zhemm_ltcopy_rvv_v1.c b/kernel/riscv64/zhemm_ltcopy_rvv_v1.c index 97013895ae..15dfc229d8 100644 --- a/kernel/riscv64/zhemm_ltcopy_rvv_v1.c +++ b/kernel/riscv64/zhemm_ltcopy_rvv_v1.c @@ -41,7 +41,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2x2 #define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2x2 #define INT_V_T vint32m2_t -#define VID_V_INT __riscv_vid_v_i32m2 +#define VID_V_INT __riscv_vid_v_u32m2 #define VADD_VX_INT __riscv_vadd_vx_i32m2 #define VFRSUB_VF_FLOAT __riscv_vfrsub_vf_f32m2 #define VMSGT_VX_INT __riscv_vmsgt_vx_i32m2_b16 @@ -50,6 +50,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define VBOOL_T vbool16_t #define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f32m2 #define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m2 +#define V_UM2_TO_IM2 __riscv_vreinterpret_v_u32m2_i32m2 #else #define VSETVL(n) __riscv_vsetvl_e64m2(n) #define VSETVL_MAX __riscv_vsetvlmax_e64m2() @@ -64,7 +65,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2x2 #define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2x2 #define INT_V_T vint64m2_t -#define VID_V_INT __riscv_vid_v_i64m2 +#define VID_V_INT __riscv_vid_v_u64m2 #define VADD_VX_INT __riscv_vadd_vx_i64m2 #define VFRSUB_VF_FLOAT __riscv_vfrsub_vf_f64m2 #define VMSGT_VX_INT __riscv_vmsgt_vx_i64m2_b32 @@ -73,6 +74,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define VBOOL_T vbool32_t #define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f64m2 #define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m2 +#define V_UM2_TO_IM2 __riscv_vreinterpret_v_u64m2_i64m2 #endif @@ -92,7 +94,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON INT_V_T vindex_max, vindex; size_t vl = VSETVL_MAX; - vindex_max = VID_V_INT(vl); + vindex_max = V_UM2_TO_IM2(VID_V_INT(vl)); vzero = VFMVVF_FLOAT(ZERO, vl); for (js = n; js > 0; js -= vl, posX += vl) { @@ -136,4 +138,3 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON return 0; } - diff --git a/kernel/riscv64/zhemm_utcopy_rvv_v1.c b/kernel/riscv64/zhemm_utcopy_rvv_v1.c index 59029e9e59..cc7c44e12c 100644 --- a/kernel/riscv64/zhemm_utcopy_rvv_v1.c +++ b/kernel/riscv64/zhemm_utcopy_rvv_v1.c @@ -41,7 +41,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2x2 #define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2x2 #define INT_V_T vint32m2_t -#define VID_V_INT __riscv_vid_v_i32m2 +#define VID_V_INT __riscv_vid_v_u32m2 #define VADD_VX_INT __riscv_vadd_vx_i32m2 #define VFRSUB_VF_FLOAT __riscv_vfrsub_vf_f32m2 #define VMSGT_VX_INT __riscv_vmsgt_vx_i32m2_b16 @@ -50,6 +50,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define VBOOL_T vbool16_t #define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f32m2 #define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m2 +#define V_UM2_TO_IM2 __riscv_vreinterpret_v_u32m2_i32m2 #else #define VSETVL(n) __riscv_vsetvl_e64m2(n) #define VSETVL_MAX __riscv_vsetvlmax_e64m2() @@ -64,7 +65,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2x2 #define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2x2 #define INT_V_T vint64m2_t -#define VID_V_INT __riscv_vid_v_i64m2 +#define VID_V_INT __riscv_vid_v_u64m2 #define VADD_VX_INT __riscv_vadd_vx_i64m2 #define VFRSUB_VF_FLOAT __riscv_vfrsub_vf_f64m2 #define VMSGT_VX_INT __riscv_vmsgt_vx_i64m2_b32 @@ -73,6 +74,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define VBOOL_T vbool32_t #define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f64m2 #define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m2 +#define V_UM2_TO_IM2 __riscv_vreinterpret_v_u64m2_i64m2 #endif @@ -90,7 +92,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON INT_V_T vindex_max, vindex; size_t vl = VSETVL_MAX; - vindex_max = VID_V_INT(vl); + vindex_max = V_UM2_TO_IM2(VID_V_INT(vl)); vzero = VFMVVF_FLOAT(ZERO, vl); for (js = n; js > 0; js -= vl, posX += vl) { @@ -132,4 +134,4 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON } return 0; -} +} \ No newline at end of file diff --git a/kernel/riscv64/zomatcopy_cn_rvv.c b/kernel/riscv64/zomatcopy_cn_rvv.c new file mode 100644 index 0000000000..1edf9551ae --- /dev/null +++ b/kernel/riscv64/zomatcopy_cn_rvv.c @@ -0,0 +1,100 @@ +/*************************************************************************** +Copyright (c) 2013, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" +#include + +#if defined(DOUBLE) +#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m4x2 +#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m4x2 +#define VSETVL __riscv_vsetvl_e64m4 +#define FLOAT_VX2_T vfloat64m4x2_t +#define VGET_VX2 __riscv_vget_v_f64m4x2_f64m4 +#define VSET_VX2 __riscv_vset_v_f64m4_f64m4x2 +#define FLOAT_V vfloat64m4_t +#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m4 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m4 +#else +#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m4x2 +#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m4x2 +#define VSETVL __riscv_vsetvl_e32m4 +#define FLOAT_VX2_T vfloat32m4x2_t +#define VGET_VX2 __riscv_vget_v_f32m4x2_f32m4 +#define VSET_VX2 __riscv_vset_v_f32m4_f32m4x2 +#define FLOAT_V vfloat32m4_t +#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m4 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m4 +#endif + +int CNAME(BLASLONG rows, BLASLONG cols, FLOAT alpha_r, FLOAT alpha_i, FLOAT *a, BLASLONG lda, FLOAT *b, BLASLONG ldb) +{ + BLASLONG i,j,ia; + FLOAT *aptr,*bptr; + size_t vl; + FLOAT_VX2_T va, vb; + FLOAT_V va0, va1, vb0, vb1, vtemp; + + if ( rows <= 0 ) return(0); + if ( cols <= 0 ) return(0); + + aptr = a; + bptr = b; + + lda *= 2; + ldb *= 2; + + for ( i=0; i 0; js -= vl, posX += vl) { vl = VSETVL(js); @@ -118,4 +120,3 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON return 0; } - diff --git a/kernel/riscv64/zsymm_ucopy_rvv_v1.c b/kernel/riscv64/zsymm_ucopy_rvv_v1.c index 069551bb0e..5f3ac3d07d 100644 --- a/kernel/riscv64/zsymm_ucopy_rvv_v1.c +++ b/kernel/riscv64/zsymm_ucopy_rvv_v1.c @@ -41,11 +41,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2x2 #define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2x2 #define INT_V_T vint32m2_t -#define VID_V_INT __riscv_vid_v_i32m2 +#define VID_V_INT __riscv_vid_v_u32m2 #define VADD_VX_INT __riscv_vadd_vx_i32m2 #define VMSGT_VX_INT __riscv_vmsgt_vx_i32m2_b16 #define VBOOL_T vbool16_t #define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f32m2 +#define V_UM2_TO_IM2 __riscv_vreinterpret_v_u32m2_i32m2 #else #define VSETVL(n) __riscv_vsetvl_e64m2(n) #define VSETVL_MAX __riscv_vsetvlmax_e64m2() @@ -60,11 +61,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2x2 #define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2x2 #define INT_V_T vint64m2_t -#define VID_V_INT __riscv_vid_v_i64m2 +#define VID_V_INT __riscv_vid_v_u64m2 #define VADD_VX_INT __riscv_vadd_vx_i64m2 #define VMSGT_VX_INT __riscv_vmsgt_vx_i64m2_b32 #define VBOOL_T vbool32_t #define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f64m2 +#define V_UM2_TO_IM2 __riscv_vreinterpret_v_u64m2_i64m2 #endif @@ -83,7 +85,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON size_t vl = VSETVL_MAX; - vindex_max = VID_V_INT(vl); + vindex_max = V_UM2_TO_IM2(VID_V_INT(vl)); for (js = n; js > 0; js -= vl, posX += vl) { vl = VSETVL(js); @@ -118,4 +120,4 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON } return 0; -} +} \ No newline at end of file diff --git a/kernel/riscv64/ztrmm_lncopy_rvv_v1.c b/kernel/riscv64/ztrmm_lncopy_rvv_v1.c index ae664561b4..9264f13781 100644 --- a/kernel/riscv64/ztrmm_lncopy_rvv_v1.c +++ b/kernel/riscv64/ztrmm_lncopy_rvv_v1.c @@ -42,10 +42,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2x2 #define VBOOL_T vbool16_t #define UINT_V_T vint32m2_t -#define VID_V_UINT __riscv_vid_v_i32m2 +#define VID_V_UINT __riscv_vid_v_u32m2 #define VMSGTU_VX_UINT __riscv_vmsgt_vx_i32m2_b16 #define VMSEQ_VX_UINT __riscv_vmseq_vx_i32m2_b16 #define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f32m2 +#define V_UM2_TO_IM2 __riscv_vreinterpret_v_u32m2_i32m2 #else #define VSETVL(n) __riscv_vsetvl_e64m2(n) #define FLOAT_V_T vfloat64m2_t @@ -63,6 +64,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u64m2_b32 #define VMSEQ_VX_UINT __riscv_vmseq_vx_u64m2_b32 #define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f64m2 +#define V_UM2_TO_IM2(values) values #endif int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLONG posY, FLOAT *b){ @@ -99,7 +101,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON } i = 0; - do + do { if (X > posY) { @@ -119,9 +121,9 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON X ++; i ++; } - else + else { - vindex = VID_V_UINT(vl); + vindex = V_UM2_TO_IM2(VID_V_UINT(vl)); for (unsigned int j = 0; j < vl; j++) { vax2 = VLSSEG2_FLOAT(ao, stride_lda, vl); @@ -152,4 +154,4 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON } return 0; -} +} \ No newline at end of file diff --git a/kernel/setparam-ref.c b/kernel/setparam-ref.c index dece71a66a..24c2855579 100644 --- a/kernel/setparam-ref.c +++ b/kernel/setparam-ref.c @@ -72,9 +72,9 @@ gotoblas_t TABLE_NAME = { samax_kTS, samin_kTS, smax_kTS, smin_kTS, isamax_kTS, isamin_kTS, ismax_kTS, ismin_kTS, - snrm2_kTS, sasum_kTS, ssum_kTS, scopy_kTS, sbdot_kTS, + snrm2_kTS, sasum_kTS, ssum_kTS, scopy_kTS, sbdot_kTS, dsdot_kTS, - srot_kTS, saxpy_kTS, sscal_kTS, sswap_kTS, + srot_kTS, srotm_kTS, saxpy_kTS, sscal_kTS, sswap_kTS, sbgemv_nTS, sbgemv_tTS, sger_kTS, ssymv_LTS, ssymv_UTS, @@ -158,7 +158,7 @@ gotoblas_t TABLE_NAME = { #if (BUILD_SINGLE==1) || (BUILD_DOUBLE==1) || (BUILD_COMPLEX==1) scopy_kTS, sdot_kTS, // dsdot_kTS, - srot_kTS, saxpy_kTS, + srot_kTS, srotm_kTS, saxpy_kTS, #endif #if (BUILD_SINGLE==1) || (BUILD_DOUBLE==1) || (BUILD_COMPLEX==1) || (BUILD_COMPLEX16==1) sscal_kTS, @@ -265,6 +265,7 @@ gotoblas_t TABLE_NAME = { #endif #if (BUILD_DOUBLE==1) || (BUILD_COMPLEX16==1) drot_kTS, + drotm_kTS, daxpy_kTS, dscal_kTS, dswap_kTS, @@ -336,10 +337,9 @@ gotoblas_t TABLE_NAME = { qamax_kTS, qamin_kTS, qmax_kTS, qmin_kTS, iqamax_kTS, iqamin_kTS, iqmax_kTS, iqmin_kTS, qnrm2_kTS, qasum_kTS, qsum_kTS, qcopy_kTS, qdot_kTS, - qrot_kTS, qaxpy_kTS, qscal_kTS, qswap_kTS, + qrot_kTS, qrotm_kTS, qaxpy_kTS, qscal_kTS, qswap_kTS, qgemv_nTS, qgemv_tTS, qger_kTS, qsymv_LTS, qsymv_UTS, - qgemm_kernelTS, qgemm_betaTS, #if QGEMM_DEFAULT_UNROLL_M != QGEMM_DEFAULT_UNROLL_N qgemm_incopyTS, qgemm_itcopyTS, diff --git a/kernel/sparc/KERNEL b/kernel/sparc/KERNEL index a8c958bb45..d6580609bd 100644 --- a/kernel/sparc/KERNEL +++ b/kernel/sparc/KERNEL @@ -75,3 +75,14 @@ DGEMM_BETA = ../generic/gemm_beta.c CGEMM_BETA = ../generic/zgemm_beta.c ZGEMM_BETA = ../generic/zgemm_beta.c +ifndef SROTMKERNEL +SROTMKERNEL = ../generic/rotm.c +endif + +ifndef DROTMKERNEL +DROTMKERNEL = ../generic/rotm.c +endif + +ifndef QROTMKERNEL +QROTMKERNEL = ../generic/rotm.c +endif diff --git a/kernel/x86/KERNEL b/kernel/x86/KERNEL index 83b51db13f..1095c15286 100644 --- a/kernel/x86/KERNEL +++ b/kernel/x86/KERNEL @@ -189,3 +189,14 @@ ZGEMM_BETA = ../generic/zgemm_beta.c QGEMM_BETA = ../generic/gemm_beta.c XGEMM_BETA = ../generic/zgemm_beta.c +ifndef SROTMKERNEL +SROTMKERNEL = ../generic/rotm.c +endif + +ifndef DROTMKERNEL +DROTMKERNEL = ../generic/rotm.c +endif + +ifndef QROTMKERNEL +QROTMKERNEL = ../generic/rotm.c +endif diff --git a/kernel/x86/KERNEL.generic b/kernel/x86/KERNEL.generic index 0aac0ce996..ada3ff42d4 100644 --- a/kernel/x86/KERNEL.generic +++ b/kernel/x86/KERNEL.generic @@ -162,3 +162,15 @@ ZHEMV_L_KERNEL = ../generic/zhemv_k.c CGEMM3MKERNEL = ../generic/zgemm3mkernel_dump.c ZGEMM3MKERNEL = ../generic/zgemm3mkernel_dump.c + +ifndef SROTMKERNEL +SROTMKERNEL = ../generic/rotm.c +endif + +ifndef DROTMKERNEL +DROTMKERNEL = ../generic/rotm.c +endif + +ifndef QROTMKERNEL +QROTMKERNEL = ../generic/rotm.c +endif diff --git a/kernel/x86_64/KERNEL b/kernel/x86_64/KERNEL index 2deb5a864c..c270ff0771 100644 --- a/kernel/x86_64/KERNEL +++ b/kernel/x86_64/KERNEL @@ -290,6 +290,18 @@ ifndef QROTKERNEL QROTKERNEL = rot.S endif +ifndef SROTMKERNEL +SROTMKERNEL = ../generic/rotm.c +endif + +ifndef DROTMKERNEL +DROTMKERNEL = ../generic/rotm.c +endif + +ifndef QROTMKERNEL +QROTMKERNEL = ../generic/rotm.c +endif + ifndef CROTKERNEL CROTKERNEL = zrot_sse.S endif diff --git a/kernel/x86_64/KERNEL.generic b/kernel/x86_64/KERNEL.generic index 7cb0cb836c..36dc9f43d2 100644 --- a/kernel/x86_64/KERNEL.generic +++ b/kernel/x86_64/KERNEL.generic @@ -168,3 +168,15 @@ QCABS_KERNEL = ../generic/cabs.c #Dump kernel CGEMM3MKERNEL = ../generic/zgemm3mkernel_dump.c ZGEMM3MKERNEL = ../generic/zgemm3mkernel_dump.c + +ifndef SROTMKERNEL +SROTMKERNEL = ../generic/rotm.c +endif + +ifndef DROTMKERNEL +DROTMKERNEL = ../generic/rotm.c +endif + +ifndef QROTMKERNEL +QROTMKERNEL = ../generic/rotm.c +endif diff --git a/kernel/zarch/KERNEL b/kernel/zarch/KERNEL index 68d68b5f86..cd94052035 100644 --- a/kernel/zarch/KERNEL +++ b/kernel/zarch/KERNEL @@ -27,4 +27,14 @@ ifndef ZGEMM_BETA ZGEMM_BETA = ../generic/zgemm_beta.c endif +ifndef SROTMKERNEL +SROTMKERNEL = ../generic/rotm.c +endif + +ifndef DROTMKERNEL +DROTMKERNEL = ../generic/rotm.c +endif +ifndef QROTMKERNEL +QROTMKERNEL = ../generic/rotm.c +endif diff --git a/kernel/zarch/KERNEL.ZARCH_GENERIC b/kernel/zarch/KERNEL.ZARCH_GENERIC index 33850d0f7d..6321cf6e39 100644 --- a/kernel/zarch/KERNEL.ZARCH_GENERIC +++ b/kernel/zarch/KERNEL.ZARCH_GENERIC @@ -135,5 +135,14 @@ ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c +ifndef SROTMKERNEL +SROTMKERNEL = ../generic/rotm.c +endif +ifndef DROTMKERNEL +DROTMKERNEL = ../generic/rotm.c +endif +ifndef QROTMKERNEL +QROTMKERNEL = ../generic/rotm.c +endif diff --git a/lapack-netlib/LAPACKE/include/lapacke_config.h b/lapack-netlib/LAPACKE/include/lapacke_config.h index 798a5eb2ef..4ef542fb10 100644 --- a/lapack-netlib/LAPACKE/include/lapacke_config.h +++ b/lapack-netlib/LAPACKE/include/lapacke_config.h @@ -67,8 +67,14 @@ extern "C" { #define lapack_logical lapack_int #endif +#if defined(_MSC_VER) && defined(__INTEL_CLANG_COMPILER) +#define LAPACK_COMPLEX_STRUCTURE +#define LAPACK_GLOBAL(lcname,UCNAME) lcname +#define NOCHANGE +#endif + #ifndef LAPACK_COMPLEX_CUSTOM -#if defined(_MSC_VER) +#if defined(_MSC_VER) && !defined(__INTEL_CLANG_COMPILER) #define _CRT_USE_C_COMPLEX_H #include #define LAPACK_COMPLEX_CUSTOM diff --git a/lapack-netlib/LAPACKE/src/lapacke_cgesvd_work.c b/lapack-netlib/LAPACKE/src/lapacke_cgesvd_work.c index 6bc69d48f0..a41819ccc1 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_cgesvd_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_cgesvd_work.c @@ -74,11 +74,13 @@ lapack_int LAPACKE_cgesvd_work( int matrix_layout, char jobu, char jobvt, LAPACKE_xerbla( "LAPACKE_cgesvd_work", info ); return info; } - if( ldvt < ncols_vt ) { + if( LAPACKE_lsame( jobvt, 'a' ) || LAPACKE_lsame( jobvt, 's' ) ) { + if( ldvt < ncols_vt ) { info = -12; LAPACKE_xerbla( "LAPACKE_cgesvd_work", info ); return info; } + } /* Query optimal working array(s) size if requested */ if( lwork == -1 ) { LAPACK_cgesvd( &jobu, &jobvt, &m, &n, a, &lda_t, s, u, &ldu_t, vt, diff --git a/lapack-netlib/LAPACKE/src/lapacke_ctpmqrt_work.c b/lapack-netlib/LAPACKE/src/lapacke_ctpmqrt_work.c index e01664bdf8..cb80787a85 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_ctpmqrt_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_ctpmqrt_work.c @@ -51,8 +51,8 @@ lapack_int LAPACKE_ctpmqrt_work( int matrix_layout, char side, char trans, } } else if( matrix_layout == LAPACK_ROW_MAJOR ) { lapack_int nrowsA, ncolsA, nrowsV; - if ( side == LAPACKE_lsame(side, 'l') ) { nrowsA = k; ncolsA = n; nrowsV = m; } - else if ( side == LAPACKE_lsame(side, 'r') ) { nrowsA = m; ncolsA = k; nrowsV = n; } + if ( LAPACKE_lsame(side, 'l') ) { nrowsA = k; ncolsA = n; nrowsV = m; } + else if ( LAPACKE_lsame(side, 'r') ) { nrowsA = m; ncolsA = k; nrowsV = n; } else { info = -2; LAPACKE_xerbla( "LAPACKE_ctpmqrt_work", info ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_dgesvd_work.c b/lapack-netlib/LAPACKE/src/lapacke_dgesvd_work.c index 6668dd7484..d79583b539 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dgesvd_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dgesvd_work.c @@ -72,11 +72,13 @@ lapack_int LAPACKE_dgesvd_work( int matrix_layout, char jobu, char jobvt, LAPACKE_xerbla( "LAPACKE_dgesvd_work", info ); return info; } + if( LAPACKE_lsame( jobvt, 'a' ) || LAPACKE_lsame( jobvt, 's' ) ) { if( ldvt < ncols_vt ) { info = -12; LAPACKE_xerbla( "LAPACKE_dgesvd_work", info ); return info; } + } /* Query optimal working array(s) size if requested */ if( lwork == -1 ) { LAPACK_dgesvd( &jobu, &jobvt, &m, &n, a, &lda_t, s, u, &ldu_t, vt, diff --git a/lapack-netlib/LAPACKE/src/lapacke_dtpmqrt_work.c b/lapack-netlib/LAPACKE/src/lapacke_dtpmqrt_work.c index 366acd3690..1278a8128d 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dtpmqrt_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dtpmqrt_work.c @@ -49,8 +49,8 @@ lapack_int LAPACKE_dtpmqrt_work( int matrix_layout, char side, char trans, } } else if( matrix_layout == LAPACK_ROW_MAJOR ) { lapack_int nrowsA, ncolsA, nrowsV; - if ( side == LAPACKE_lsame(side, 'l') ) { nrowsA = k; ncolsA = n; nrowsV = m; } - else if ( side == LAPACKE_lsame(side, 'r') ) { nrowsA = m; ncolsA = k; nrowsV = n; } + if ( LAPACKE_lsame(side, 'l') ) { nrowsA = k; ncolsA = n; nrowsV = m; } + else if ( LAPACKE_lsame(side, 'r') ) { nrowsA = m; ncolsA = k; nrowsV = n; } else { info = -2; LAPACKE_xerbla( "LAPACKE_dtpmqrt_work", info ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_sgesvd_work.c b/lapack-netlib/LAPACKE/src/lapacke_sgesvd_work.c index c764333ed3..c7561db0b5 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_sgesvd_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_sgesvd_work.c @@ -72,11 +72,13 @@ lapack_int LAPACKE_sgesvd_work( int matrix_layout, char jobu, char jobvt, LAPACKE_xerbla( "LAPACKE_sgesvd_work", info ); return info; } - if( ldvt < ncols_vt ) { + if( LAPACKE_lsame( jobvt, 'a' ) || LAPACKE_lsame( jobvt, 's' ) ) { + if( ldvt < ncols_vt ) { info = -12; LAPACKE_xerbla( "LAPACKE_sgesvd_work", info ); return info; } + } /* Query optimal working array(s) size if requested */ if( lwork == -1 ) { LAPACK_sgesvd( &jobu, &jobvt, &m, &n, a, &lda_t, s, u, &ldu_t, vt, diff --git a/lapack-netlib/LAPACKE/src/lapacke_stpmqrt_work.c b/lapack-netlib/LAPACKE/src/lapacke_stpmqrt_work.c index c5a3a14965..d055223f5b 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_stpmqrt_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_stpmqrt_work.c @@ -49,8 +49,8 @@ lapack_int LAPACKE_stpmqrt_work( int matrix_layout, char side, char trans, } } else if( matrix_layout == LAPACK_ROW_MAJOR ) { lapack_int nrowsA, ncolsA, nrowsV; - if ( side == LAPACKE_lsame(side, 'l') ) { nrowsA = k; ncolsA = n; nrowsV = m; } - else if ( side == LAPACKE_lsame(side, 'r') ) { nrowsA = m; ncolsA = k; nrowsV = n; } + if ( LAPACKE_lsame(side, 'l') ) { nrowsA = k; ncolsA = n; nrowsV = m; } + else if ( LAPACKE_lsame(side, 'r') ) { nrowsA = m; ncolsA = k; nrowsV = n; } else { info = -2; LAPACKE_xerbla( "LAPACKE_stpmqrt_work", info ); diff --git a/lapack-netlib/LAPACKE/src/lapacke_zgesvd_work.c b/lapack-netlib/LAPACKE/src/lapacke_zgesvd_work.c index ba48bb052f..07e228c803 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zgesvd_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zgesvd_work.c @@ -74,11 +74,13 @@ lapack_int LAPACKE_zgesvd_work( int matrix_layout, char jobu, char jobvt, LAPACKE_xerbla( "LAPACKE_zgesvd_work", info ); return info; } - if( ldvt < ncols_vt ) { + if( LAPACKE_lsame( jobvt, 'a' ) || LAPACKE_lsame( jobvt, 's' ) ) { + if( ldvt < ncols_vt ) { info = -12; LAPACKE_xerbla( "LAPACKE_zgesvd_work", info ); return info; } + } /* Query optimal working array(s) size if requested */ if( lwork == -1 ) { LAPACK_zgesvd( &jobu, &jobvt, &m, &n, a, &lda_t, s, u, &ldu_t, vt, diff --git a/lapack-netlib/LAPACKE/src/lapacke_ztpmqrt_work.c b/lapack-netlib/LAPACKE/src/lapacke_ztpmqrt_work.c index 104efa8f3c..0cacc665db 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_ztpmqrt_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_ztpmqrt_work.c @@ -51,8 +51,8 @@ lapack_int LAPACKE_ztpmqrt_work( int matrix_layout, char side, char trans, } } else if( matrix_layout == LAPACK_ROW_MAJOR ) { lapack_int nrowsA, ncolsA, nrowsV; - if ( side == LAPACKE_lsame(side, 'l') ) { nrowsA = k; ncolsA = n; nrowsV = m; } - else if ( side == LAPACKE_lsame(side, 'r') ) { nrowsA = m; ncolsA = k; nrowsV = n; } + if ( LAPACKE_lsame(side, 'l') ) { nrowsA = k; ncolsA = n; nrowsV = m; } + else if ( LAPACKE_lsame(side, 'r') ) { nrowsA = m; ncolsA = k; nrowsV = n; } else { info = -2; LAPACKE_xerbla( "LAPACKE_ztpmqrt_work", info ); diff --git a/lapack-netlib/SRC/VARIANTS/Makefile b/lapack-netlib/SRC/VARIANTS/Makefile index 35e50cbc2c..4b0575cc6f 100644 --- a/lapack-netlib/SRC/VARIANTS/Makefile +++ b/lapack-netlib/SRC/VARIANTS/Makefile @@ -30,9 +30,11 @@ LUREC = lu/REC/cgetrf.o lu/REC/dgetrf.o lu/REC/sgetrf.o lu/REC/zgetrf.o QRLL = qr/LL/cgeqrf.o qr/LL/dgeqrf.o qr/LL/sgeqrf.o qr/LL/zgeqrf.o +LARFTL2 = larft/LL-LVL2/clarft.o larft/LL-LVL2/dlarft.o larft/LL-LVL2/slarft.o larft/LL-LVL2/zlarft.o + .PHONY: all -all: cholrl.a choltop.a lucr.a lull.a lurec.a qrll.a +all: cholrl.a choltop.a lucr.a lull.a lurec.a qrll.a larftl2.a cholrl.a: $(CHOLRL) $(AR) $(ARFLAGS) $@ $^ @@ -58,9 +60,13 @@ qrll.a: $(QRLL) $(AR) $(ARFLAGS) $@ $^ $(RANLIB) $@ +larftl2.a: $(LARFTL2) + $(AR) $(ARFLAGS) $@ $^ + $(RANLIB) $@ + .PHONY: clean cleanobj cleanlib clean: cleanobj cleanlib cleanobj: - rm -f $(CHOLRL) $(CHOLTOP) $(LUCR) $(LULL) $(LUREC) $(QRLL) + rm -f $(CHOLRL) $(CHOLTOP) $(LUCR) $(LULL) $(LUREC) $(QRLL) $(LARFTL2) cleanlib: rm -f *.a diff --git a/lapack-netlib/SRC/VARIANTS/README b/lapack-netlib/SRC/VARIANTS/README index ef7626debe..217cfa3e01 100644 --- a/lapack-netlib/SRC/VARIANTS/README +++ b/lapack-netlib/SRC/VARIANTS/README @@ -23,6 +23,7 @@ This directory contains several variants of LAPACK routines in single/double/com - [sdcz]geqrf with QR Left Looking Level 3 BLAS version algorithm [2]- Directory: SRC/VARIANTS/qr/LL - [sdcz]potrf with Cholesky Right Looking Level 3 BLAS version algorithm [2]- Directory: SRC/VARIANTS/cholesky/RL - [sdcz]potrf with Cholesky Top Level 3 BLAS version algorithm [2]- Directory: SRC/VARIANTS/cholesky/TOP + - [sdcz]larft using a Left Looking Level 2 BLAS version algorithm - Directory: SRC/VARIANTS/larft/LL-LVL2 References:For a more detailed description please refer to - [1] Toledo, S. 1997. Locality of Reference in LU Decomposition with Partial Pivoting. SIAM J. Matrix Anal. Appl. 18, 4 (Oct. 1997), @@ -44,6 +45,7 @@ Corresponding libraries created in SRC/VARIANTS: - QR Left Looking : qrll.a - Cholesky Right Looking : cholrl.a - Cholesky Top : choltop.a + - LARFT Level 2: larftl2.a =========== diff --git a/lapack-netlib/SRC/VARIANTS/larft/LL_LVL2/clarft.f b/lapack-netlib/SRC/VARIANTS/larft/LL_LVL2/clarft.f new file mode 100644 index 0000000000..9a7000eff3 --- /dev/null +++ b/lapack-netlib/SRC/VARIANTS/larft/LL_LVL2/clarft.f @@ -0,0 +1,328 @@ +*> \brief \b CLARFT VARIANT: left-looking Level 2 BLAS version of the algorithm +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +*> \htmlonly +*> Download CLARFT + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> +*> [TXT] +*> \endhtmlonly +* +* Definition: +* =========== +* +* SUBROUTINE CLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) +* +* .. Scalar Arguments .. +* CHARACTER DIRECT, STOREV +* INTEGER K, LDT, LDV, N +* .. +* .. Array Arguments .. +* COMPLEX T( LDT, * ), TAU( * ), V( LDV, * ) +* .. +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> CLARFT forms the triangular factor T of a complex block reflector H +*> of order n, which is defined as a product of k elementary reflectors. +*> +*> If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular; +*> +*> If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular. +*> +*> If STOREV = 'C', the vector which defines the elementary reflector +*> H(i) is stored in the i-th column of the array V, and +*> +*> H = I - V * T * V**H +*> +*> If STOREV = 'R', the vector which defines the elementary reflector +*> H(i) is stored in the i-th row of the array V, and +*> +*> H = I - V**H * T * V +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] DIRECT +*> \verbatim +*> DIRECT is CHARACTER*1 +*> Specifies the order in which the elementary reflectors are +*> multiplied to form the block reflector: +*> = 'F': H = H(1) H(2) . . . H(k) (Forward) +*> = 'B': H = H(k) . . . H(2) H(1) (Backward) +*> \endverbatim +*> +*> \param[in] STOREV +*> \verbatim +*> STOREV is CHARACTER*1 +*> Specifies how the vectors which define the elementary +*> reflectors are stored (see also Further Details): +*> = 'C': columnwise +*> = 'R': rowwise +*> \endverbatim +*> +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> The order of the block reflector H. N >= 0. +*> \endverbatim +*> +*> \param[in] K +*> \verbatim +*> K is INTEGER +*> The order of the triangular factor T (= the number of +*> elementary reflectors). K >= 1. +*> \endverbatim +*> +*> \param[in] V +*> \verbatim +*> V is COMPLEX array, dimension +*> (LDV,K) if STOREV = 'C' +*> (LDV,N) if STOREV = 'R' +*> The matrix V. See further details. +*> \endverbatim +*> +*> \param[in] LDV +*> \verbatim +*> LDV is INTEGER +*> The leading dimension of the array V. +*> If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K. +*> \endverbatim +*> +*> \param[in] TAU +*> \verbatim +*> TAU is COMPLEX array, dimension (K) +*> TAU(i) must contain the scalar factor of the elementary +*> reflector H(i). +*> \endverbatim +*> +*> \param[out] T +*> \verbatim +*> T is COMPLEX array, dimension (LDT,K) +*> The k by k triangular factor T of the block reflector. +*> If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is +*> lower triangular. The rest of the array is not used. +*> \endverbatim +*> +*> \param[in] LDT +*> \verbatim +*> LDT is INTEGER +*> The leading dimension of the array T. LDT >= K. +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \ingroup larft +* +*> \par Further Details: +* ===================== +*> +*> \verbatim +*> +*> The shape of the matrix V and the storage of the vectors which define +*> the H(i) is best illustrated by the following example with n = 5 and +*> k = 3. The elements equal to 1 are not stored. +*> +*> DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R': +*> +*> V = ( 1 ) V = ( 1 v1 v1 v1 v1 ) +*> ( v1 1 ) ( 1 v2 v2 v2 ) +*> ( v1 v2 1 ) ( 1 v3 v3 ) +*> ( v1 v2 v3 ) +*> ( v1 v2 v3 ) +*> +*> DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R': +*> +*> V = ( v1 v2 v3 ) V = ( v1 v1 1 ) +*> ( v1 v2 v3 ) ( v2 v2 v2 1 ) +*> ( 1 v2 v3 ) ( v3 v3 v3 v3 1 ) +*> ( 1 v3 ) +*> ( 1 ) +*> \endverbatim +*> +* ===================================================================== + SUBROUTINE CLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) +* +* -- LAPACK auxiliary routine -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* +* .. Scalar Arguments .. + CHARACTER DIRECT, STOREV + INTEGER K, LDT, LDV, N +* .. +* .. Array Arguments .. + COMPLEX T( LDT, * ), TAU( * ), V( LDV, * ) +* .. +* +* ===================================================================== +* +* .. Parameters .. + COMPLEX ONE, ZERO + PARAMETER ( ONE = ( 1.0E+0, 0.0E+0 ), + $ ZERO = ( 0.0E+0, 0.0E+0 ) ) +* .. +* .. Local Scalars .. + INTEGER I, J, PREVLASTV, LASTV +* .. +* .. External Subroutines .. + EXTERNAL CGEMM, CGEMV, CTRMV +* .. +* .. External Functions .. + LOGICAL LSAME + EXTERNAL LSAME +* .. +* .. Executable Statements .. +* +* Quick return if possible +* + IF( N.EQ.0 ) + $ RETURN +* + IF( LSAME( DIRECT, 'F' ) ) THEN + PREVLASTV = N + DO I = 1, K + PREVLASTV = MAX( PREVLASTV, I ) + IF( TAU( I ).EQ.ZERO ) THEN +* +* H(i) = I +* + DO J = 1, I + T( J, I ) = ZERO + END DO + ELSE +* +* general case +* + IF( LSAME( STOREV, 'C' ) ) THEN +* Skip any trailing zeros. + DO LASTV = N, I+1, -1 + IF( V( LASTV, I ).NE.ZERO ) EXIT + END DO + DO J = 1, I-1 + T( J, I ) = -TAU( I ) * CONJG( V( I , J ) ) + END DO + J = MIN( LASTV, PREVLASTV ) +* +* T(1:i-1,i) := - tau(i) * V(i:j,1:i-1)**H * V(i:j,i) +* + CALL CGEMV( 'Conjugate transpose', J-I, I-1, + $ -TAU( I ), V( I+1, 1 ), LDV, + $ V( I+1, I ), 1, + $ ONE, T( 1, I ), 1 ) + ELSE +* Skip any trailing zeros. + DO LASTV = N, I+1, -1 + IF( V( I, LASTV ).NE.ZERO ) EXIT + END DO + DO J = 1, I-1 + T( J, I ) = -TAU( I ) * V( J , I ) + END DO + J = MIN( LASTV, PREVLASTV ) +* +* T(1:i-1,i) := - tau(i) * V(1:i-1,i:j) * V(i,i:j)**H +* + CALL CGEMM( 'N', 'C', I-1, 1, J-I, -TAU( I ), + $ V( 1, I+1 ), LDV, V( I, I+1 ), LDV, + $ ONE, T( 1, I ), LDT ) + END IF +* +* T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) +* + CALL CTRMV( 'Upper', 'No transpose', 'Non-unit', I-1, + $ T, + $ LDT, T( 1, I ), 1 ) + T( I, I ) = TAU( I ) + IF( I.GT.1 ) THEN + PREVLASTV = MAX( PREVLASTV, LASTV ) + ELSE + PREVLASTV = LASTV + END IF + END IF + END DO + ELSE + PREVLASTV = 1 + DO I = K, 1, -1 + IF( TAU( I ).EQ.ZERO ) THEN +* +* H(i) = I +* + DO J = I, K + T( J, I ) = ZERO + END DO + ELSE +* +* general case +* + IF( I.LT.K ) THEN + IF( LSAME( STOREV, 'C' ) ) THEN +* Skip any leading zeros. + DO LASTV = 1, I-1 + IF( V( LASTV, I ).NE.ZERO ) EXIT + END DO + DO J = I+1, K + T( J, I ) = -TAU( I ) * CONJG( V( N-K+I , J ) ) + END DO + J = MAX( LASTV, PREVLASTV ) +* +* T(i+1:k,i) = -tau(i) * V(j:n-k+i,i+1:k)**H * V(j:n-k+i,i) +* + CALL CGEMV( 'Conjugate transpose', N-K+I-J, K-I, + $ -TAU( I ), V( J, I+1 ), LDV, V( J, I ), + $ 1, ONE, T( I+1, I ), 1 ) + ELSE +* Skip any leading zeros. + DO LASTV = 1, I-1 + IF( V( I, LASTV ).NE.ZERO ) EXIT + END DO + DO J = I+1, K + T( J, I ) = -TAU( I ) * V( J, N-K+I ) + END DO + J = MAX( LASTV, PREVLASTV ) +* +* T(i+1:k,i) = -tau(i) * V(i+1:k,j:n-k+i) * V(i,j:n-k+i)**H +* + CALL CGEMM( 'N', 'C', K-I, 1, N-K+I-J, + $ -TAU( I ), + $ V( I+1, J ), LDV, V( I, J ), LDV, + $ ONE, T( I+1, I ), LDT ) + END IF +* +* T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) +* + CALL CTRMV( 'Lower', 'No transpose', 'Non-unit', + $ K-I, + $ T( I+1, I+1 ), LDT, T( I+1, I ), 1 ) + IF( I.GT.1 ) THEN + PREVLASTV = MIN( PREVLASTV, LASTV ) + ELSE + PREVLASTV = LASTV + END IF + END IF + T( I, I ) = TAU( I ) + END IF + END DO + END IF + RETURN +* +* End of CLARFT +* + END diff --git a/lapack-netlib/SRC/VARIANTS/larft/LL_LVL2/dlarft.f b/lapack-netlib/SRC/VARIANTS/larft/LL_LVL2/dlarft.f new file mode 100644 index 0000000000..19b7c7b1b2 --- /dev/null +++ b/lapack-netlib/SRC/VARIANTS/larft/LL_LVL2/dlarft.f @@ -0,0 +1,326 @@ +*> \brief \b DLARFT VARIANT: left-looking Level 2 BLAS version of the algorithm +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +*> \htmlonly +*> Download DLARFT + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> +*> [TXT] +*> \endhtmlonly +* +* Definition: +* =========== +* +* SUBROUTINE DLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) +* +* .. Scalar Arguments .. +* CHARACTER DIRECT, STOREV +* INTEGER K, LDT, LDV, N +* .. +* .. Array Arguments .. +* DOUBLE PRECISION T( LDT, * ), TAU( * ), V( LDV, * ) +* .. +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> DLARFT forms the triangular factor T of a real block reflector H +*> of order n, which is defined as a product of k elementary reflectors. +*> +*> If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular; +*> +*> If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular. +*> +*> If STOREV = 'C', the vector which defines the elementary reflector +*> H(i) is stored in the i-th column of the array V, and +*> +*> H = I - V * T * V**T +*> +*> If STOREV = 'R', the vector which defines the elementary reflector +*> H(i) is stored in the i-th row of the array V, and +*> +*> H = I - V**T * T * V +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] DIRECT +*> \verbatim +*> DIRECT is CHARACTER*1 +*> Specifies the order in which the elementary reflectors are +*> multiplied to form the block reflector: +*> = 'F': H = H(1) H(2) . . . H(k) (Forward) +*> = 'B': H = H(k) . . . H(2) H(1) (Backward) +*> \endverbatim +*> +*> \param[in] STOREV +*> \verbatim +*> STOREV is CHARACTER*1 +*> Specifies how the vectors which define the elementary +*> reflectors are stored (see also Further Details): +*> = 'C': columnwise +*> = 'R': rowwise +*> \endverbatim +*> +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> The order of the block reflector H. N >= 0. +*> \endverbatim +*> +*> \param[in] K +*> \verbatim +*> K is INTEGER +*> The order of the triangular factor T (= the number of +*> elementary reflectors). K >= 1. +*> \endverbatim +*> +*> \param[in] V +*> \verbatim +*> V is DOUBLE PRECISION array, dimension +*> (LDV,K) if STOREV = 'C' +*> (LDV,N) if STOREV = 'R' +*> The matrix V. See further details. +*> \endverbatim +*> +*> \param[in] LDV +*> \verbatim +*> LDV is INTEGER +*> The leading dimension of the array V. +*> If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K. +*> \endverbatim +*> +*> \param[in] TAU +*> \verbatim +*> TAU is DOUBLE PRECISION array, dimension (K) +*> TAU(i) must contain the scalar factor of the elementary +*> reflector H(i). +*> \endverbatim +*> +*> \param[out] T +*> \verbatim +*> T is DOUBLE PRECISION array, dimension (LDT,K) +*> The k by k triangular factor T of the block reflector. +*> If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is +*> lower triangular. The rest of the array is not used. +*> \endverbatim +*> +*> \param[in] LDT +*> \verbatim +*> LDT is INTEGER +*> The leading dimension of the array T. LDT >= K. +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \ingroup larft +* +*> \par Further Details: +* ===================== +*> +*> \verbatim +*> +*> The shape of the matrix V and the storage of the vectors which define +*> the H(i) is best illustrated by the following example with n = 5 and +*> k = 3. The elements equal to 1 are not stored. +*> +*> DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R': +*> +*> V = ( 1 ) V = ( 1 v1 v1 v1 v1 ) +*> ( v1 1 ) ( 1 v2 v2 v2 ) +*> ( v1 v2 1 ) ( 1 v3 v3 ) +*> ( v1 v2 v3 ) +*> ( v1 v2 v3 ) +*> +*> DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R': +*> +*> V = ( v1 v2 v3 ) V = ( v1 v1 1 ) +*> ( v1 v2 v3 ) ( v2 v2 v2 1 ) +*> ( 1 v2 v3 ) ( v3 v3 v3 v3 1 ) +*> ( 1 v3 ) +*> ( 1 ) +*> \endverbatim +*> +* ===================================================================== + SUBROUTINE DLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) +* +* -- LAPACK auxiliary routine -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* +* .. Scalar Arguments .. + CHARACTER DIRECT, STOREV + INTEGER K, LDT, LDV, N +* .. +* .. Array Arguments .. + DOUBLE PRECISION T( LDT, * ), TAU( * ), V( LDV, * ) +* .. +* +* ===================================================================== +* +* .. Parameters .. + DOUBLE PRECISION ONE, ZERO + PARAMETER ( ONE = 1.0D+0, ZERO = 0.0D+0 ) +* .. +* .. Local Scalars .. + INTEGER I, J, PREVLASTV, LASTV +* .. +* .. External Subroutines .. + EXTERNAL DGEMV, DTRMV +* .. +* .. External Functions .. + LOGICAL LSAME + EXTERNAL LSAME +* .. +* .. Executable Statements .. +* +* Quick return if possible +* + IF( N.EQ.0 ) + $ RETURN +* + IF( LSAME( DIRECT, 'F' ) ) THEN + PREVLASTV = N + DO I = 1, K + PREVLASTV = MAX( I, PREVLASTV ) + IF( TAU( I ).EQ.ZERO ) THEN +* +* H(i) = I +* + DO J = 1, I + T( J, I ) = ZERO + END DO + ELSE +* +* general case +* + IF( LSAME( STOREV, 'C' ) ) THEN +* Skip any trailing zeros. + DO LASTV = N, I+1, -1 + IF( V( LASTV, I ).NE.ZERO ) EXIT + END DO + DO J = 1, I-1 + T( J, I ) = -TAU( I ) * V( I , J ) + END DO + J = MIN( LASTV, PREVLASTV ) +* +* T(1:i-1,i) := - tau(i) * V(i:j,1:i-1)**T * V(i:j,i) +* + CALL DGEMV( 'Transpose', J-I, I-1, -TAU( I ), + $ V( I+1, 1 ), LDV, V( I+1, I ), 1, ONE, + $ T( 1, I ), 1 ) + ELSE +* Skip any trailing zeros. + DO LASTV = N, I+1, -1 + IF( V( I, LASTV ).NE.ZERO ) EXIT + END DO + DO J = 1, I-1 + T( J, I ) = -TAU( I ) * V( J , I ) + END DO + J = MIN( LASTV, PREVLASTV ) +* +* T(1:i-1,i) := - tau(i) * V(1:i-1,i:j) * V(i,i:j)**T +* + CALL DGEMV( 'No transpose', I-1, J-I, -TAU( I ), + $ V( 1, I+1 ), LDV, V( I, I+1 ), LDV, ONE, + $ T( 1, I ), 1 ) + END IF +* +* T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) +* + CALL DTRMV( 'Upper', 'No transpose', 'Non-unit', I-1, + $ T, + $ LDT, T( 1, I ), 1 ) + T( I, I ) = TAU( I ) + IF( I.GT.1 ) THEN + PREVLASTV = MAX( PREVLASTV, LASTV ) + ELSE + PREVLASTV = LASTV + END IF + END IF + END DO + ELSE + PREVLASTV = 1 + DO I = K, 1, -1 + IF( TAU( I ).EQ.ZERO ) THEN +* +* H(i) = I +* + DO J = I, K + T( J, I ) = ZERO + END DO + ELSE +* +* general case +* + IF( I.LT.K ) THEN + IF( LSAME( STOREV, 'C' ) ) THEN +* Skip any leading zeros. + DO LASTV = 1, I-1 + IF( V( LASTV, I ).NE.ZERO ) EXIT + END DO + DO J = I+1, K + T( J, I ) = -TAU( I ) * V( N-K+I , J ) + END DO + J = MAX( LASTV, PREVLASTV ) +* +* T(i+1:k,i) = -tau(i) * V(j:n-k+i,i+1:k)**T * V(j:n-k+i,i) +* + CALL DGEMV( 'Transpose', N-K+I-J, K-I, + $ -TAU( I ), + $ V( J, I+1 ), LDV, V( J, I ), 1, ONE, + $ T( I+1, I ), 1 ) + ELSE +* Skip any leading zeros. + DO LASTV = 1, I-1 + IF( V( I, LASTV ).NE.ZERO ) EXIT + END DO + DO J = I+1, K + T( J, I ) = -TAU( I ) * V( J, N-K+I ) + END DO + J = MAX( LASTV, PREVLASTV ) +* +* T(i+1:k,i) = -tau(i) * V(i+1:k,j:n-k+i) * V(i,j:n-k+i)**T +* + CALL DGEMV( 'No transpose', K-I, N-K+I-J, + $ -TAU( I ), V( I+1, J ), LDV, V( I, J ), LDV, + $ ONE, T( I+1, I ), 1 ) + END IF +* +* T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) +* + CALL DTRMV( 'Lower', 'No transpose', 'Non-unit', + $ K-I, + $ T( I+1, I+1 ), LDT, T( I+1, I ), 1 ) + IF( I.GT.1 ) THEN + PREVLASTV = MIN( PREVLASTV, LASTV ) + ELSE + PREVLASTV = LASTV + END IF + END IF + T( I, I ) = TAU( I ) + END IF + END DO + END IF + RETURN +* +* End of DLARFT +* + END diff --git a/lapack-netlib/SRC/VARIANTS/larft/LL_LVL2/slarft.f b/lapack-netlib/SRC/VARIANTS/larft/LL_LVL2/slarft.f new file mode 100644 index 0000000000..e1578e2587 --- /dev/null +++ b/lapack-netlib/SRC/VARIANTS/larft/LL_LVL2/slarft.f @@ -0,0 +1,326 @@ +*> \brief \b SLARFT VARIANT: left-looking Level 2 BLAS version of the algorithm. +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +*> \htmlonly +*> Download SLARFT + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> +*> [TXT] +*> \endhtmlonly +* +* Definition: +* =========== +* +* SUBROUTINE SLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) +* +* .. Scalar Arguments .. +* CHARACTER DIRECT, STOREV +* INTEGER K, LDT, LDV, N +* .. +* .. Array Arguments .. +* REAL T( LDT, * ), TAU( * ), V( LDV, * ) +* .. +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> SLARFT forms the triangular factor T of a real block reflector H +*> of order n, which is defined as a product of k elementary reflectors. +*> +*> If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular; +*> +*> If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular. +*> +*> If STOREV = 'C', the vector which defines the elementary reflector +*> H(i) is stored in the i-th column of the array V, and +*> +*> H = I - V * T * V**T +*> +*> If STOREV = 'R', the vector which defines the elementary reflector +*> H(i) is stored in the i-th row of the array V, and +*> +*> H = I - V**T * T * V +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] DIRECT +*> \verbatim +*> DIRECT is CHARACTER*1 +*> Specifies the order in which the elementary reflectors are +*> multiplied to form the block reflector: +*> = 'F': H = H(1) H(2) . . . H(k) (Forward) +*> = 'B': H = H(k) . . . H(2) H(1) (Backward) +*> \endverbatim +*> +*> \param[in] STOREV +*> \verbatim +*> STOREV is CHARACTER*1 +*> Specifies how the vectors which define the elementary +*> reflectors are stored (see also Further Details): +*> = 'C': columnwise +*> = 'R': rowwise +*> \endverbatim +*> +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> The order of the block reflector H. N >= 0. +*> \endverbatim +*> +*> \param[in] K +*> \verbatim +*> K is INTEGER +*> The order of the triangular factor T (= the number of +*> elementary reflectors). K >= 1. +*> \endverbatim +*> +*> \param[in] V +*> \verbatim +*> V is REAL array, dimension +*> (LDV,K) if STOREV = 'C' +*> (LDV,N) if STOREV = 'R' +*> The matrix V. See further details. +*> \endverbatim +*> +*> \param[in] LDV +*> \verbatim +*> LDV is INTEGER +*> The leading dimension of the array V. +*> If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K. +*> \endverbatim +*> +*> \param[in] TAU +*> \verbatim +*> TAU is REAL array, dimension (K) +*> TAU(i) must contain the scalar factor of the elementary +*> reflector H(i). +*> \endverbatim +*> +*> \param[out] T +*> \verbatim +*> T is REAL array, dimension (LDT,K) +*> The k by k triangular factor T of the block reflector. +*> If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is +*> lower triangular. The rest of the array is not used. +*> \endverbatim +*> +*> \param[in] LDT +*> \verbatim +*> LDT is INTEGER +*> The leading dimension of the array T. LDT >= K. +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \ingroup larft +* +*> \par Further Details: +* ===================== +*> +*> \verbatim +*> +*> The shape of the matrix V and the storage of the vectors which define +*> the H(i) is best illustrated by the following example with n = 5 and +*> k = 3. The elements equal to 1 are not stored. +*> +*> DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R': +*> +*> V = ( 1 ) V = ( 1 v1 v1 v1 v1 ) +*> ( v1 1 ) ( 1 v2 v2 v2 ) +*> ( v1 v2 1 ) ( 1 v3 v3 ) +*> ( v1 v2 v3 ) +*> ( v1 v2 v3 ) +*> +*> DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R': +*> +*> V = ( v1 v2 v3 ) V = ( v1 v1 1 ) +*> ( v1 v2 v3 ) ( v2 v2 v2 1 ) +*> ( 1 v2 v3 ) ( v3 v3 v3 v3 1 ) +*> ( 1 v3 ) +*> ( 1 ) +*> \endverbatim +*> +* ===================================================================== + SUBROUTINE SLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) +* +* -- LAPACK auxiliary routine -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* +* .. Scalar Arguments .. + CHARACTER DIRECT, STOREV + INTEGER K, LDT, LDV, N +* .. +* .. Array Arguments .. + REAL T( LDT, * ), TAU( * ), V( LDV, * ) +* .. +* +* ===================================================================== +* +* .. Parameters .. + REAL ONE, ZERO + PARAMETER ( ONE = 1.0E+0, ZERO = 0.0E+0 ) +* .. +* .. Local Scalars .. + INTEGER I, J, PREVLASTV, LASTV +* .. +* .. External Subroutines .. + EXTERNAL SGEMV, STRMV +* .. +* .. External Functions .. + LOGICAL LSAME + EXTERNAL LSAME +* .. +* .. Executable Statements .. +* +* Quick return if possible +* + IF( N.EQ.0 ) + $ RETURN +* + IF( LSAME( DIRECT, 'F' ) ) THEN + PREVLASTV = N + DO I = 1, K + PREVLASTV = MAX( I, PREVLASTV ) + IF( TAU( I ).EQ.ZERO ) THEN +* +* H(i) = I +* + DO J = 1, I + T( J, I ) = ZERO + END DO + ELSE +* +* general case +* + IF( LSAME( STOREV, 'C' ) ) THEN +* Skip any trailing zeros. + DO LASTV = N, I+1, -1 + IF( V( LASTV, I ).NE.ZERO ) EXIT + END DO + DO J = 1, I-1 + T( J, I ) = -TAU( I ) * V( I , J ) + END DO + J = MIN( LASTV, PREVLASTV ) +* +* T(1:i-1,i) := - tau(i) * V(i:j,1:i-1)**T * V(i:j,i) +* + CALL SGEMV( 'Transpose', J-I, I-1, -TAU( I ), + $ V( I+1, 1 ), LDV, V( I+1, I ), 1, ONE, + $ T( 1, I ), 1 ) + ELSE +* Skip any trailing zeros. + DO LASTV = N, I+1, -1 + IF( V( I, LASTV ).NE.ZERO ) EXIT + END DO + DO J = 1, I-1 + T( J, I ) = -TAU( I ) * V( J , I ) + END DO + J = MIN( LASTV, PREVLASTV ) +* +* T(1:i-1,i) := - tau(i) * V(1:i-1,i:j) * V(i,i:j)**T +* + CALL SGEMV( 'No transpose', I-1, J-I, -TAU( I ), + $ V( 1, I+1 ), LDV, V( I, I+1 ), LDV, + $ ONE, T( 1, I ), 1 ) + END IF +* +* T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) +* + CALL STRMV( 'Upper', 'No transpose', 'Non-unit', I-1, + $ T, + $ LDT, T( 1, I ), 1 ) + T( I, I ) = TAU( I ) + IF( I.GT.1 ) THEN + PREVLASTV = MAX( PREVLASTV, LASTV ) + ELSE + PREVLASTV = LASTV + END IF + END IF + END DO + ELSE + PREVLASTV = 1 + DO I = K, 1, -1 + IF( TAU( I ).EQ.ZERO ) THEN +* +* H(i) = I +* + DO J = I, K + T( J, I ) = ZERO + END DO + ELSE +* +* general case +* + IF( I.LT.K ) THEN + IF( LSAME( STOREV, 'C' ) ) THEN +* Skip any leading zeros. + DO LASTV = 1, I-1 + IF( V( LASTV, I ).NE.ZERO ) EXIT + END DO + DO J = I+1, K + T( J, I ) = -TAU( I ) * V( N-K+I , J ) + END DO + J = MAX( LASTV, PREVLASTV ) +* +* T(i+1:k,i) = -tau(i) * V(j:n-k+i,i+1:k)**T * V(j:n-k+i,i) +* + CALL SGEMV( 'Transpose', N-K+I-J, K-I, + $ -TAU( I ), + $ V( J, I+1 ), LDV, V( J, I ), 1, ONE, + $ T( I+1, I ), 1 ) + ELSE +* Skip any leading zeros. + DO LASTV = 1, I-1 + IF( V( I, LASTV ).NE.ZERO ) EXIT + END DO + DO J = I+1, K + T( J, I ) = -TAU( I ) * V( J, N-K+I ) + END DO + J = MAX( LASTV, PREVLASTV ) +* +* T(i+1:k,i) = -tau(i) * V(i+1:k,j:n-k+i) * V(i,j:n-k+i)**T +* + CALL SGEMV( 'No transpose', K-I, N-K+I-J, + $ -TAU( I ), V( I+1, J ), LDV, V( I, J ), LDV, + $ ONE, T( I+1, I ), 1 ) + END IF +* +* T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) +* + CALL STRMV( 'Lower', 'No transpose', 'Non-unit', + $ K-I, + $ T( I+1, I+1 ), LDT, T( I+1, I ), 1 ) + IF( I.GT.1 ) THEN + PREVLASTV = MIN( PREVLASTV, LASTV ) + ELSE + PREVLASTV = LASTV + END IF + END IF + T( I, I ) = TAU( I ) + END IF + END DO + END IF + RETURN +* +* End of SLARFT +* + END diff --git a/lapack-netlib/SRC/VARIANTS/larft/LL_LVL2/zlarft.f b/lapack-netlib/SRC/VARIANTS/larft/LL_LVL2/zlarft.f new file mode 100644 index 0000000000..6abadd501e --- /dev/null +++ b/lapack-netlib/SRC/VARIANTS/larft/LL_LVL2/zlarft.f @@ -0,0 +1,327 @@ +*> \brief \b ZLARFT VARIANT: left-looking Level 2 BLAS version of the algorithm. +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +*> \htmlonly +*> Download ZLARFT + dependencies +*> +*> [TGZ] +*> +*> [ZIP] +*> +*> [TXT] +*> \endhtmlonly +* +* Definition: +* =========== +* +* SUBROUTINE ZLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) +* +* .. Scalar Arguments .. +* CHARACTER DIRECT, STOREV +* INTEGER K, LDT, LDV, N +* .. +* .. Array Arguments .. +* COMPLEX*16 T( LDT, * ), TAU( * ), V( LDV, * ) +* .. +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> ZLARFT forms the triangular factor T of a complex block reflector H +*> of order n, which is defined as a product of k elementary reflectors. +*> +*> If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular; +*> +*> If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular. +*> +*> If STOREV = 'C', the vector which defines the elementary reflector +*> H(i) is stored in the i-th column of the array V, and +*> +*> H = I - V * T * V**H +*> +*> If STOREV = 'R', the vector which defines the elementary reflector +*> H(i) is stored in the i-th row of the array V, and +*> +*> H = I - V**H * T * V +*> \endverbatim +* +* Arguments: +* ========== +* +*> \param[in] DIRECT +*> \verbatim +*> DIRECT is CHARACTER*1 +*> Specifies the order in which the elementary reflectors are +*> multiplied to form the block reflector: +*> = 'F': H = H(1) H(2) . . . H(k) (Forward) +*> = 'B': H = H(k) . . . H(2) H(1) (Backward) +*> \endverbatim +*> +*> \param[in] STOREV +*> \verbatim +*> STOREV is CHARACTER*1 +*> Specifies how the vectors which define the elementary +*> reflectors are stored (see also Further Details): +*> = 'C': columnwise +*> = 'R': rowwise +*> \endverbatim +*> +*> \param[in] N +*> \verbatim +*> N is INTEGER +*> The order of the block reflector H. N >= 0. +*> \endverbatim +*> +*> \param[in] K +*> \verbatim +*> K is INTEGER +*> The order of the triangular factor T (= the number of +*> elementary reflectors). K >= 1. +*> \endverbatim +*> +*> \param[in] V +*> \verbatim +*> V is COMPLEX*16 array, dimension +*> (LDV,K) if STOREV = 'C' +*> (LDV,N) if STOREV = 'R' +*> The matrix V. See further details. +*> \endverbatim +*> +*> \param[in] LDV +*> \verbatim +*> LDV is INTEGER +*> The leading dimension of the array V. +*> If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K. +*> \endverbatim +*> +*> \param[in] TAU +*> \verbatim +*> TAU is COMPLEX*16 array, dimension (K) +*> TAU(i) must contain the scalar factor of the elementary +*> reflector H(i). +*> \endverbatim +*> +*> \param[out] T +*> \verbatim +*> T is COMPLEX*16 array, dimension (LDT,K) +*> The k by k triangular factor T of the block reflector. +*> If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is +*> lower triangular. The rest of the array is not used. +*> \endverbatim +*> +*> \param[in] LDT +*> \verbatim +*> LDT is INTEGER +*> The leading dimension of the array T. LDT >= K. +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \ingroup larft +* +*> \par Further Details: +* ===================== +*> +*> \verbatim +*> +*> The shape of the matrix V and the storage of the vectors which define +*> the H(i) is best illustrated by the following example with n = 5 and +*> k = 3. The elements equal to 1 are not stored. +*> +*> DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R': +*> +*> V = ( 1 ) V = ( 1 v1 v1 v1 v1 ) +*> ( v1 1 ) ( 1 v2 v2 v2 ) +*> ( v1 v2 1 ) ( 1 v3 v3 ) +*> ( v1 v2 v3 ) +*> ( v1 v2 v3 ) +*> +*> DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R': +*> +*> V = ( v1 v2 v3 ) V = ( v1 v1 1 ) +*> ( v1 v2 v3 ) ( v2 v2 v2 1 ) +*> ( 1 v2 v3 ) ( v3 v3 v3 v3 1 ) +*> ( 1 v3 ) +*> ( 1 ) +*> \endverbatim +*> +* ===================================================================== + SUBROUTINE ZLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) +* +* -- LAPACK auxiliary routine -- +* -- LAPACK is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* +* .. Scalar Arguments .. + CHARACTER DIRECT, STOREV + INTEGER K, LDT, LDV, N +* .. +* .. Array Arguments .. + COMPLEX*16 T( LDT, * ), TAU( * ), V( LDV, * ) +* .. +* +* ===================================================================== +* +* .. Parameters .. + COMPLEX*16 ONE, ZERO + PARAMETER ( ONE = ( 1.0D+0, 0.0D+0 ), + $ ZERO = ( 0.0D+0, 0.0D+0 ) ) +* .. +* .. Local Scalars .. + INTEGER I, J, PREVLASTV, LASTV +* .. +* .. External Subroutines .. + EXTERNAL ZGEMV, ZTRMV, ZGEMM +* .. +* .. External Functions .. + LOGICAL LSAME + EXTERNAL LSAME +* .. +* .. Executable Statements .. +* +* Quick return if possible +* + IF( N.EQ.0 ) + $ RETURN +* + IF( LSAME( DIRECT, 'F' ) ) THEN + PREVLASTV = N + DO I = 1, K + PREVLASTV = MAX( PREVLASTV, I ) + IF( TAU( I ).EQ.ZERO ) THEN +* +* H(i) = I +* + DO J = 1, I + T( J, I ) = ZERO + END DO + ELSE +* +* general case +* + IF( LSAME( STOREV, 'C' ) ) THEN +* Skip any trailing zeros. + DO LASTV = N, I+1, -1 + IF( V( LASTV, I ).NE.ZERO ) EXIT + END DO + DO J = 1, I-1 + T( J, I ) = -TAU( I ) * CONJG( V( I , J ) ) + END DO + J = MIN( LASTV, PREVLASTV ) +* +* T(1:i-1,i) := - tau(i) * V(i:j,1:i-1)**H * V(i:j,i) +* + CALL ZGEMV( 'Conjugate transpose', J-I, I-1, + $ -TAU( I ), V( I+1, 1 ), LDV, + $ V( I+1, I ), 1, ONE, T( 1, I ), 1 ) + ELSE +* Skip any trailing zeros. + DO LASTV = N, I+1, -1 + IF( V( I, LASTV ).NE.ZERO ) EXIT + END DO + DO J = 1, I-1 + T( J, I ) = -TAU( I ) * V( J , I ) + END DO + J = MIN( LASTV, PREVLASTV ) +* +* T(1:i-1,i) := - tau(i) * V(1:i-1,i:j) * V(i,i:j)**H +* + CALL ZGEMM( 'N', 'C', I-1, 1, J-I, -TAU( I ), + $ V( 1, I+1 ), LDV, V( I, I+1 ), LDV, + $ ONE, T( 1, I ), LDT ) + END IF +* +* T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) +* + CALL ZTRMV( 'Upper', 'No transpose', 'Non-unit', I-1, + $ T, + $ LDT, T( 1, I ), 1 ) + T( I, I ) = TAU( I ) + IF( I.GT.1 ) THEN + PREVLASTV = MAX( PREVLASTV, LASTV ) + ELSE + PREVLASTV = LASTV + END IF + END IF + END DO + ELSE + PREVLASTV = 1 + DO I = K, 1, -1 + IF( TAU( I ).EQ.ZERO ) THEN +* +* H(i) = I +* + DO J = I, K + T( J, I ) = ZERO + END DO + ELSE +* +* general case +* + IF( I.LT.K ) THEN + IF( LSAME( STOREV, 'C' ) ) THEN +* Skip any leading zeros. + DO LASTV = 1, I-1 + IF( V( LASTV, I ).NE.ZERO ) EXIT + END DO + DO J = I+1, K + T( J, I ) = -TAU( I ) * CONJG( V( N-K+I , J ) ) + END DO + J = MAX( LASTV, PREVLASTV ) +* +* T(i+1:k,i) = -tau(i) * V(j:n-k+i,i+1:k)**H * V(j:n-k+i,i) +* + CALL ZGEMV( 'Conjugate transpose', N-K+I-J, K-I, + $ -TAU( I ), V( J, I+1 ), LDV, V( J, I ), + $ 1, ONE, T( I+1, I ), 1 ) + ELSE +* Skip any leading zeros. + DO LASTV = 1, I-1 + IF( V( I, LASTV ).NE.ZERO ) EXIT + END DO + DO J = I+1, K + T( J, I ) = -TAU( I ) * V( J, N-K+I ) + END DO + J = MAX( LASTV, PREVLASTV ) +* +* T(i+1:k,i) = -tau(i) * V(i+1:k,j:n-k+i) * V(i,j:n-k+i)**H +* + CALL ZGEMM( 'N', 'C', K-I, 1, N-K+I-J, + $ -TAU( I ), + $ V( I+1, J ), LDV, V( I, J ), LDV, + $ ONE, T( I+1, I ), LDT ) + END IF +* +* T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) +* + CALL ZTRMV( 'Lower', 'No transpose', 'Non-unit', + $ K-I, + $ T( I+1, I+1 ), LDT, T( I+1, I ), 1 ) + IF( I.GT.1 ) THEN + PREVLASTV = MIN( PREVLASTV, LASTV ) + ELSE + PREVLASTV = LASTV + END IF + END IF + T( I, I ) = TAU( I ) + END IF + END DO + END IF + RETURN +* +* End of ZLARFT +* + END diff --git a/lapack-netlib/SRC/claqp2rk.f b/lapack-netlib/SRC/claqp2rk.f index 6b1db085aa..0501c50bb4 100644 --- a/lapack-netlib/SRC/claqp2rk.f +++ b/lapack-netlib/SRC/claqp2rk.f @@ -378,7 +378,7 @@ SUBROUTINE CLAQP2RK( M, N, NRHS, IOFFSET, KMAX, ABSTOL, RELTOL, EXTERNAL CLARF, CLARFG, CSWAP * .. * .. Intrinsic Functions .. - INTRINSIC ABS, REAL, CONJG, IMAG, MAX, MIN, SQRT + INTRINSIC ABS, REAL, CONJG, AIMAG, MAX, MIN, SQRT * .. * .. External Functions .. LOGICAL SISNAN @@ -599,8 +599,8 @@ SUBROUTINE CLAQP2RK( M, N, NRHS, IOFFSET, KMAX, ABSTOL, RELTOL, * IF( SISNAN( REAL( TAU(KK) ) ) ) THEN TAUNAN = REAL( TAU(KK) ) - ELSE IF( SISNAN( IMAG( TAU(KK) ) ) ) THEN - TAUNAN = IMAG( TAU(KK) ) + ELSE IF( SISNAN( AIMAG( TAU(KK) ) ) ) THEN + TAUNAN = AIMAG( TAU(KK) ) ELSE TAUNAN = ZERO END IF diff --git a/lapack-netlib/SRC/claqp3rk.f b/lapack-netlib/SRC/claqp3rk.f index 3703bcbd65..8fe5a220ff 100644 --- a/lapack-netlib/SRC/claqp3rk.f +++ b/lapack-netlib/SRC/claqp3rk.f @@ -431,7 +431,7 @@ SUBROUTINE CLAQP3RK( M, N, NRHS, IOFFSET, NB, ABSTOL, EXTERNAL CGEMM, CGEMV, CLARFG, CSWAP * .. * .. Intrinsic Functions .. - INTRINSIC ABS, REAL, CONJG, IMAG, MAX, MIN, SQRT + INTRINSIC ABS, REAL, CONJG, AIMAG, MAX, MIN, SQRT * .. * .. External Functions .. LOGICAL SISNAN @@ -739,8 +739,8 @@ SUBROUTINE CLAQP3RK( M, N, NRHS, IOFFSET, NB, ABSTOL, * IF( SISNAN( REAL( TAU(K) ) ) ) THEN TAUNAN = REAL( TAU(K) ) - ELSE IF( SISNAN( IMAG( TAU(K) ) ) ) THEN - TAUNAN = IMAG( TAU(K) ) + ELSE IF( SISNAN( AIMAG( TAU(K) ) ) ) THEN + TAUNAN = AIMAG( TAU(K) ) ELSE TAUNAN = ZERO END IF diff --git a/lapack-netlib/SRC/clarft.f b/lapack-netlib/SRC/clarft.f index fdf80b78e9..de8b97bf9c 100644 --- a/lapack-netlib/SRC/clarft.f +++ b/lapack-netlib/SRC/clarft.f @@ -18,7 +18,7 @@ * Definition: * =========== * -* SUBROUTINE CLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) +* RECURSIVE SUBROUTINE CLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) * * .. Scalar Arguments .. * CHARACTER DIRECT, STOREV @@ -130,7 +130,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \ingroup complexOTHERauxiliary +*> \ingroup larft * *> \par Further Details: * ===================== @@ -159,167 +159,473 @@ *> \endverbatim *> * ===================================================================== - SUBROUTINE CLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) + RECURSIVE SUBROUTINE CLARFT( DIRECT, STOREV, N, K, V, LDV, + $ TAU, T, LDT ) * * -- LAPACK auxiliary routine -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * -* .. Scalar Arguments .. - CHARACTER DIRECT, STOREV - INTEGER K, LDT, LDV, N +* .. Scalar Arguments +* + CHARACTER DIRECT, STOREV + INTEGER K, LDT, LDV, N * .. * .. Array Arguments .. - COMPLEX T( LDT, * ), TAU( * ), V( LDV, * ) -* .. * -* ===================================================================== + COMPLEX T( LDT, * ), TAU( * ), V( LDV, * ) +* .. * * .. Parameters .. - COMPLEX ONE, ZERO - PARAMETER ( ONE = ( 1.0E+0, 0.0E+0 ), - $ ZERO = ( 0.0E+0, 0.0E+0 ) ) -* .. +* + COMPLEX ONE, NEG_ONE, ZERO + PARAMETER(ONE=1.0E+0, ZERO = 0.0E+0, NEG_ONE=-1.0E+0) +* * .. Local Scalars .. - INTEGER I, J, PREVLASTV, LASTV -* .. +* + INTEGER I,J,L + LOGICAL QR,LQ,QL,DIRF,COLV +* * .. External Subroutines .. - EXTERNAL CGEMM, CGEMV, CTRMV -* .. -* .. External Functions .. - LOGICAL LSAME - EXTERNAL LSAME +* + EXTERNAL CTRMM,CGEMM,CLACPY +* +* .. External Functions.. +* + LOGICAL LSAME + EXTERNAL LSAME +* +* .. Intrinsic Functions.. +* + INTRINSIC CONJG +* +* The general scheme used is inspired by the approach inside DGEQRT3 +* which was (at the time of writing this code): +* Based on the algorithm of Elmroth and Gustavson, +* IBM J. Res. Develop. Vol 44 No. 4 July 2000. * .. * .. Executable Statements .. * * Quick return if possible * - IF( N.EQ.0 ) - $ RETURN -* - IF( LSAME( DIRECT, 'F' ) ) THEN - PREVLASTV = N - DO I = 1, K - PREVLASTV = MAX( PREVLASTV, I ) - IF( TAU( I ).EQ.ZERO ) THEN -* -* H(i) = I -* - DO J = 1, I - T( J, I ) = ZERO - END DO - ELSE -* -* general case -* - IF( LSAME( STOREV, 'C' ) ) THEN -* Skip any trailing zeros. - DO LASTV = N, I+1, -1 - IF( V( LASTV, I ).NE.ZERO ) EXIT - END DO - DO J = 1, I-1 - T( J, I ) = -TAU( I ) * CONJG( V( I , J ) ) - END DO - J = MIN( LASTV, PREVLASTV ) -* -* T(1:i-1,i) := - tau(i) * V(i:j,1:i-1)**H * V(i:j,i) -* - CALL CGEMV( 'Conjugate transpose', J-I, I-1, - $ -TAU( I ), V( I+1, 1 ), LDV, - $ V( I+1, I ), 1, - $ ONE, T( 1, I ), 1 ) - ELSE -* Skip any trailing zeros. - DO LASTV = N, I+1, -1 - IF( V( I, LASTV ).NE.ZERO ) EXIT - END DO - DO J = 1, I-1 - T( J, I ) = -TAU( I ) * V( J , I ) - END DO - J = MIN( LASTV, PREVLASTV ) -* -* T(1:i-1,i) := - tau(i) * V(1:i-1,i:j) * V(i,i:j)**H -* - CALL CGEMM( 'N', 'C', I-1, 1, J-I, -TAU( I ), - $ V( 1, I+1 ), LDV, V( I, I+1 ), LDV, - $ ONE, T( 1, I ), LDT ) - END IF -* -* T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) -* - CALL CTRMV( 'Upper', 'No transpose', 'Non-unit', I-1, T, - $ LDT, T( 1, I ), 1 ) - T( I, I ) = TAU( I ) - IF( I.GT.1 ) THEN - PREVLASTV = MAX( PREVLASTV, LASTV ) - ELSE - PREVLASTV = LASTV - END IF - END IF + IF(N.EQ.0.OR.K.EQ.0) THEN + RETURN + END IF +* +* Base case +* + IF(N.EQ.1.OR.K.EQ.1) THEN + T(1,1) = TAU(1) + RETURN + END IF +* +* Beginning of executable statements +* + L = K / 2 +* +* Determine what kind of Q we need to compute +* We assume that if the user doesn't provide 'F' for DIRECT, +* then they meant to provide 'B' and if they don't provide +* 'C' for STOREV, then they meant to provide 'R' +* + DIRF = LSAME(DIRECT,'F') + COLV = LSAME(STOREV,'C') +* +* QR happens when we have forward direction in column storage +* + QR = DIRF.AND.COLV +* +* LQ happens when we have forward direction in row storage +* + LQ = DIRF.AND.(.NOT.COLV) +* +* QL happens when we have backward direction in column storage +* + QL = (.NOT.DIRF).AND.COLV +* +* The last case is RQ. Due to how we structured this, if the +* above 3 are false, then RQ must be true, so we never store +* this +* RQ happens when we have backward direction in row storage +* RQ = (.NOT.DIRF).AND.(.NOT.COLV) +* + IF(QR) THEN +* +* Break V apart into 6 components +* +* V = |---------------| +* |V_{1,1} 0 | +* |V_{2,1} V_{2,2}| +* |V_{3,1} V_{3,2}| +* |---------------| +* +* V_{1,1}\in\C^{l,l} unit lower triangular +* V_{2,1}\in\C^{k-l,l} rectangular +* V_{3,1}\in\C^{n-k,l} rectangular +* +* V_{2,2}\in\C^{k-l,k-l} unit lower triangular +* V_{3,2}\in\C^{n-k,k-l} rectangular +* +* We will construct the T matrix +* T = |---------------| +* |T_{1,1} T_{1,2}| +* |0 T_{2,2}| +* |---------------| +* +* T is the triangular factor obtained from block reflectors. +* To motivate the structure, assume we have already computed T_{1,1} +* and T_{2,2}. Then collect the associated reflectors in V_1 and V_2 +* +* T_{1,1}\in\C^{l, l} upper triangular +* T_{2,2}\in\C^{k-l, k-l} upper triangular +* T_{1,2}\in\C^{l, k-l} rectangular +* +* Where l = floor(k/2) +* +* Then, consider the product: +* +* (I - V_1*T_{1,1}*V_1')*(I - V_2*T_{2,2}*V_2') +* = I - V_1*T_{1,1}*V_1' - V_2*T_{2,2}*V_2' + V_1*T_{1,1}*V_1'*V_2*T_{2,2}*V_2' +* +* Define T{1,2} = -T_{1,1}*V_1'*V_2*T_{2,2} +* +* Then, we can define the matrix V as +* V = |-------| +* |V_1 V_2| +* |-------| +* +* So, our product is equivalent to the matrix product +* I - V*T*V' +* This means, we can compute T_{1,1} and T_{2,2}, then use this information +* to compute T_{1,2} +* +* Compute T_{1,1} recursively +* + CALL CLARFT(DIRECT, STOREV, N, L, V, LDV, TAU, T, LDT) +* +* Compute T_{2,2} recursively +* + CALL CLARFT(DIRECT, STOREV, N-L, K-L, V(L+1, L+1), LDV, + $ TAU(L+1), T(L+1, L+1), LDT) +* +* Compute T_{1,2} +* T_{1,2} = V_{2,1}' +* + DO J = 1, L + DO I = 1, K-L + T(J, L+I) = CONJG(V(L+I, J)) + END DO END DO - ELSE - PREVLASTV = 1 - DO I = K, 1, -1 - IF( TAU( I ).EQ.ZERO ) THEN -* -* H(i) = I -* - DO J = I, K - T( J, I ) = ZERO - END DO - ELSE -* -* general case -* - IF( I.LT.K ) THEN - IF( LSAME( STOREV, 'C' ) ) THEN -* Skip any leading zeros. - DO LASTV = 1, I-1 - IF( V( LASTV, I ).NE.ZERO ) EXIT - END DO - DO J = I+1, K - T( J, I ) = -TAU( I ) * CONJG( V( N-K+I , J ) ) - END DO - J = MAX( LASTV, PREVLASTV ) -* -* T(i+1:k,i) = -tau(i) * V(j:n-k+i,i+1:k)**H * V(j:n-k+i,i) -* - CALL CGEMV( 'Conjugate transpose', N-K+I-J, K-I, - $ -TAU( I ), V( J, I+1 ), LDV, V( J, I ), - $ 1, ONE, T( I+1, I ), 1 ) - ELSE -* Skip any leading zeros. - DO LASTV = 1, I-1 - IF( V( I, LASTV ).NE.ZERO ) EXIT - END DO - DO J = I+1, K - T( J, I ) = -TAU( I ) * V( J, N-K+I ) - END DO - J = MAX( LASTV, PREVLASTV ) -* -* T(i+1:k,i) = -tau(i) * V(i+1:k,j:n-k+i) * V(i,j:n-k+i)**H -* - CALL CGEMM( 'N', 'C', K-I, 1, N-K+I-J, -TAU( I ), - $ V( I+1, J ), LDV, V( I, J ), LDV, - $ ONE, T( I+1, I ), LDT ) - END IF -* -* T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) -* - CALL CTRMV( 'Lower', 'No transpose', 'Non-unit', K-I, - $ T( I+1, I+1 ), LDT, T( I+1, I ), 1 ) - IF( I.GT.1 ) THEN - PREVLASTV = MIN( PREVLASTV, LASTV ) - ELSE - PREVLASTV = LASTV - END IF - END IF - T( I, I ) = TAU( I ) - END IF +* +* T_{1,2} = T_{1,2}*V_{2,2} +* + CALL CTRMM('Right', 'Lower', 'No transpose', 'Unit', L, + $ K-L, ONE, V(L+1, L+1), LDV, T(1, L+1), LDT) + +* +* T_{1,2} = V_{3,1}'*V_{3,2} + T_{1,2} +* Note: We assume K <= N, and GEMM will do nothing if N=K +* + CALL CGEMM('Conjugate', 'No transpose', L, K-L, N-K, ONE, + $ V(K+1, 1), LDV, V(K+1, L+1), LDV, ONE, T(1, L+1), + $ LDT) +* +* At this point, we have that T_{1,2} = V_1'*V_2 +* All that is left is to pre and post multiply by -T_{1,1} and T_{2,2} +* respectively. +* +* T_{1,2} = -T_{1,1}*T_{1,2} +* + CALL CTRMM('Left', 'Upper', 'No transpose', 'Non-unit', L, + $ K-L, NEG_ONE, T, LDT, T(1, L+1), LDT) +* +* T_{1,2} = T_{1,2}*T_{2,2} +* + CALL CTRMM('Right', 'Upper', 'No transpose', 'Non-unit', L, + $ K-L, ONE, T(L+1, L+1), LDT, T(1, L+1), LDT) + + ELSE IF(LQ) THEN +* +* Break V apart into 6 components +* +* V = |----------------------| +* |V_{1,1} V_{1,2} V{1,3}| +* |0 V_{2,2} V{2,3}| +* |----------------------| +* +* V_{1,1}\in\C^{l,l} unit upper triangular +* V_{1,2}\in\C^{l,k-l} rectangular +* V_{1,3}\in\C^{l,n-k} rectangular +* +* V_{2,2}\in\C^{k-l,k-l} unit upper triangular +* V_{2,3}\in\C^{k-l,n-k} rectangular +* +* Where l = floor(k/2) +* +* We will construct the T matrix +* T = |---------------| +* |T_{1,1} T_{1,2}| +* |0 T_{2,2}| +* |---------------| +* +* T is the triangular factor obtained from block reflectors. +* To motivate the structure, assume we have already computed T_{1,1} +* and T_{2,2}. Then collect the associated reflectors in V_1 and V_2 +* +* T_{1,1}\in\C^{l, l} upper triangular +* T_{2,2}\in\C^{k-l, k-l} upper triangular +* T_{1,2}\in\C^{l, k-l} rectangular +* +* Then, consider the product: +* +* (I - V_1'*T_{1,1}*V_1)*(I - V_2'*T_{2,2}*V_2) +* = I - V_1'*T_{1,1}*V_1 - V_2'*T_{2,2}*V_2 + V_1'*T_{1,1}*V_1*V_2'*T_{2,2}*V_2 +* +* Define T_{1,2} = -T_{1,1}*V_1*V_2'*T_{2,2} +* +* Then, we can define the matrix V as +* V = |---| +* |V_1| +* |V_2| +* |---| +* +* So, our product is equivalent to the matrix product +* I - V'*T*V +* This means, we can compute T_{1,1} and T_{2,2}, then use this information +* to compute T_{1,2} +* +* Compute T_{1,1} recursively +* + CALL CLARFT(DIRECT, STOREV, N, L, V, LDV, TAU, T, LDT) +* +* Compute T_{2,2} recursively +* + CALL CLARFT(DIRECT, STOREV, N-L, K-L, V(L+1, L+1), LDV, + $ TAU(L+1), T(L+1, L+1), LDT) + +* +* Compute T_{1,2} +* T_{1,2} = V_{1,2} +* + CALL CLACPY('All', L, K-L, V(1, L+1), LDV, T(1, L+1), LDT) +* +* T_{1,2} = T_{1,2}*V_{2,2}' +* + CALL CTRMM('Right', 'Upper', 'Conjugate', 'Unit', L, K-L, + $ ONE, V(L+1, L+1), LDV, T(1, L+1), LDT) + +* +* T_{1,2} = V_{1,3}*V_{2,3}' + T_{1,2} +* Note: We assume K <= N, and GEMM will do nothing if N=K +* + CALL CGEMM('No transpose', 'Conjugate', L, K-L, N-K, ONE, + $ V(1, K+1), LDV, V(L+1, K+1), LDV, ONE, T(1, L+1), LDT) +* +* At this point, we have that T_{1,2} = V_1*V_2' +* All that is left is to pre and post multiply by -T_{1,1} and T_{2,2} +* respectively. +* +* T_{1,2} = -T_{1,1}*T_{1,2} +* + CALL CTRMM('Left', 'Upper', 'No transpose', 'Non-unit', L, + $ K-L, NEG_ONE, T, LDT, T(1, L+1), LDT) + +* +* T_{1,2} = T_{1,2}*T_{2,2} +* + CALL CTRMM('Right', 'Upper', 'No transpose', 'Non-unit', L, + $ K-L, ONE, T(L+1,L+1), LDT, T(1, L+1), LDT) + ELSE IF(QL) THEN +* +* Break V apart into 6 components +* +* V = |---------------| +* |V_{1,1} V_{1,2}| +* |V_{2,1} V_{2,2}| +* |0 V_{3,2}| +* |---------------| +* +* V_{1,1}\in\C^{n-k,k-l} rectangular +* V_{2,1}\in\C^{k-l,k-l} unit upper triangular +* +* V_{1,2}\in\C^{n-k,l} rectangular +* V_{2,2}\in\C^{k-l,l} rectangular +* V_{3,2}\in\C^{l,l} unit upper triangular +* +* We will construct the T matrix +* T = |---------------| +* |T_{1,1} 0 | +* |T_{2,1} T_{2,2}| +* |---------------| +* +* T is the triangular factor obtained from block reflectors. +* To motivate the structure, assume we have already computed T_{1,1} +* and T_{2,2}. Then collect the associated reflectors in V_1 and V_2 +* +* T_{1,1}\in\C^{k-l, k-l} non-unit lower triangular +* T_{2,2}\in\C^{l, l} non-unit lower triangular +* T_{2,1}\in\C^{k-l, l} rectangular +* +* Where l = floor(k/2) +* +* Then, consider the product: +* +* (I - V_2*T_{2,2}*V_2')*(I - V_1*T_{1,1}*V_1') +* = I - V_2*T_{2,2}*V_2' - V_1*T_{1,1}*V_1' + V_2*T_{2,2}*V_2'*V_1*T_{1,1}*V_1' +* +* Define T_{2,1} = -T_{2,2}*V_2'*V_1*T_{1,1} +* +* Then, we can define the matrix V as +* V = |-------| +* |V_1 V_2| +* |-------| +* +* So, our product is equivalent to the matrix product +* I - V*T*V' +* This means, we can compute T_{1,1} and T_{2,2}, then use this information +* to compute T_{2,1} +* +* Compute T_{1,1} recursively +* + CALL CLARFT(DIRECT, STOREV, N-L, K-L, V, LDV, TAU, T, LDT) +* +* Compute T_{2,2} recursively +* + CALL CLARFT(DIRECT, STOREV, N, L, V(1, K-L+1), LDV, + $ TAU(K-L+1), T(K-L+1, K-L+1), LDT) +* +* Compute T_{2,1} +* T_{2,1} = V_{2,2}' +* + DO J = 1, K-L + DO I = 1, L + T(K-L+I, J) = CONJG(V(N-K+J, K-L+I)) + END DO END DO - END IF - RETURN * -* End of CLARFT +* T_{2,1} = T_{2,1}*V_{2,1} +* + CALL CTRMM('Right', 'Upper', 'No transpose', 'Unit', L, + $ K-L, ONE, V(N-K+1, 1), LDV, T(K-L+1, 1), LDT) + +* +* T_{2,1} = V_{2,2}'*V_{2,1} + T_{2,1} +* Note: We assume K <= N, and GEMM will do nothing if N=K +* + CALL CGEMM('Conjugate', 'No transpose', L, K-L, N-K, ONE, + $ V(1, K-L+1), LDV, V, LDV, ONE, T(K-L+1, 1), + $ LDT) +* +* At this point, we have that T_{2,1} = V_2'*V_1 +* All that is left is to pre and post multiply by -T_{2,2} and T_{1,1} +* respectively. +* +* T_{2,1} = -T_{2,2}*T_{2,1} +* + CALL CTRMM('Left', 'Lower', 'No transpose', 'Non-unit', L, + $ K-L, NEG_ONE, T(K-L+1, K-L+1), LDT, + $ T(K-L+1, 1), LDT) * - END +* T_{2,1} = T_{2,1}*T_{1,1} +* + CALL CTRMM('Right', 'Lower', 'No transpose', 'Non-unit', L, + $ K-L, ONE, T, LDT, T(K-L+1, 1), LDT) + ELSE +* +* Else means RQ case +* +* Break V apart into 6 components +* +* V = |-----------------------| +* |V_{1,1} V_{1,2} 0 | +* |V_{2,1} V_{2,2} V_{2,3}| +* |-----------------------| +* +* V_{1,1}\in\C^{k-l,n-k} rectangular +* V_{1,2}\in\C^{k-l,k-l} unit lower triangular +* +* V_{2,1}\in\C^{l,n-k} rectangular +* V_{2,2}\in\C^{l,k-l} rectangular +* V_{2,3}\in\C^{l,l} unit lower triangular +* +* We will construct the T matrix +* T = |---------------| +* |T_{1,1} 0 | +* |T_{2,1} T_{2,2}| +* |---------------| +* +* T is the triangular factor obtained from block reflectors. +* To motivate the structure, assume we have already computed T_{1,1} +* and T_{2,2}. Then collect the associated reflectors in V_1 and V_2 +* +* T_{1,1}\in\C^{k-l, k-l} non-unit lower triangular +* T_{2,2}\in\C^{l, l} non-unit lower triangular +* T_{2,1}\in\C^{k-l, l} rectangular +* +* Where l = floor(k/2) +* +* Then, consider the product: +* +* (I - V_2'*T_{2,2}*V_2)*(I - V_1'*T_{1,1}*V_1) +* = I - V_2'*T_{2,2}*V_2 - V_1'*T_{1,1}*V_1 + V_2'*T_{2,2}*V_2*V_1'*T_{1,1}*V_1 +* +* Define T_{2,1} = -T_{2,2}*V_2*V_1'*T_{1,1} +* +* Then, we can define the matrix V as +* V = |---| +* |V_1| +* |V_2| +* |---| +* +* So, our product is equivalent to the matrix product +* I - V'*T*V +* This means, we can compute T_{1,1} and T_{2,2}, then use this information +* to compute T_{2,1} +* +* Compute T_{1,1} recursively +* + CALL CLARFT(DIRECT, STOREV, N-L, K-L, V, LDV, TAU, T, LDT) +* +* Compute T_{2,2} recursively +* + CALL CLARFT(DIRECT, STOREV, N, L, V(K-L+1,1), LDV, + $ TAU(K-L+1), T(K-L+1, K-L+1), LDT) +* +* Compute T_{2,1} +* T_{2,1} = V_{2,2} +* + CALL CLACPY('All', L, K-L, V(K-L+1, N-K+1), LDV, + $ T(K-L+1, 1), LDT) + +* +* T_{2,1} = T_{2,1}*V_{1,2}' +* + CALL CTRMM('Right', 'Lower', 'Conjugate', 'Unit', L, K-L, + $ ONE, V(1, N-K+1), LDV, T(K-L+1,1), LDT) + +* +* T_{2,1} = V_{2,1}*V_{1,1}' + T_{2,1} +* Note: We assume K <= N, and GEMM will do nothing if N=K +* + CALL CGEMM('No transpose', 'Conjugate', L, K-L, N-K, ONE, + $ V(K-L+1, 1), LDV, V, LDV, ONE, T(K-L+1, 1), + $ LDT) + +* +* At this point, we have that T_{2,1} = V_2*V_1' +* All that is left is to pre and post multiply by -T_{2,2} and T_{1,1} +* respectively. +* +* T_{2,1} = -T_{2,2}*T_{2,1} +* + CALL CTRMM('Left', 'Lower', 'No tranpose', 'Non-unit', L, + $ K-L, NEG_ONE, T(K-L+1, K-L+1), LDT, + $ T(K-L+1, 1), LDT) + +* +* T_{2,1} = T_{2,1}*T_{1,1} +* + CALL CTRMM('Right', 'Lower', 'No tranpose', 'Non-unit', L, + $ K-L, ONE, T, LDT, T(K-L+1, 1), LDT) + END IF + END SUBROUTINE diff --git a/lapack-netlib/SRC/dlarft.f b/lapack-netlib/SRC/dlarft.f index a8d9de61f1..c27bb1a806 100644 --- a/lapack-netlib/SRC/dlarft.f +++ b/lapack-netlib/SRC/dlarft.f @@ -18,7 +18,7 @@ * Definition: * =========== * -* SUBROUTINE DLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) +* RECURSIVE SUBROUTINE DLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) * * .. Scalar Arguments .. * CHARACTER DIRECT, STOREV @@ -130,7 +130,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \ingroup doubleOTHERauxiliary +*> \ingroup larft * *> \par Further Details: * ===================== @@ -159,165 +159,470 @@ *> \endverbatim *> * ===================================================================== - SUBROUTINE DLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) + RECURSIVE SUBROUTINE DLARFT( DIRECT, STOREV, N, K, V, LDV, + $ TAU, T, LDT ) * * -- LAPACK auxiliary routine -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * -* .. Scalar Arguments .. +* .. Scalar Arguments +* CHARACTER DIRECT, STOREV INTEGER K, LDT, LDV, N * .. * .. Array Arguments .. +* DOUBLE PRECISION T( LDT, * ), TAU( * ), V( LDV, * ) * .. * -* ===================================================================== -* * .. Parameters .. - DOUBLE PRECISION ONE, ZERO - PARAMETER ( ONE = 1.0D+0, ZERO = 0.0D+0 ) -* .. +* + DOUBLE PRECISION ONE, NEG_ONE, ZERO + PARAMETER(ONE=1.0D+0, ZERO = 0.0D+0, NEG_ONE=-1.0D+0) +* * .. Local Scalars .. - INTEGER I, J, PREVLASTV, LASTV -* .. +* + INTEGER I,J,L + LOGICAL QR,LQ,QL,DIRF,COLV +* * .. External Subroutines .. - EXTERNAL DGEMV, DTRMV -* .. -* .. External Functions .. - LOGICAL LSAME - EXTERNAL LSAME +* + EXTERNAL DTRMM,DGEMM,DLACPY +* +* .. External Functions.. +* + LOGICAL LSAME + EXTERNAL LSAME +* +* The general scheme used is inspired by the approach inside DGEQRT3 +* which was (at the time of writing this code): +* Based on the algorithm of Elmroth and Gustavson, +* IBM J. Res. Develop. Vol 44 No. 4 July 2000. * .. * .. Executable Statements .. * * Quick return if possible * - IF( N.EQ.0 ) - $ RETURN -* - IF( LSAME( DIRECT, 'F' ) ) THEN - PREVLASTV = N - DO I = 1, K - PREVLASTV = MAX( I, PREVLASTV ) - IF( TAU( I ).EQ.ZERO ) THEN -* -* H(i) = I -* - DO J = 1, I - T( J, I ) = ZERO - END DO - ELSE -* -* general case -* - IF( LSAME( STOREV, 'C' ) ) THEN -* Skip any trailing zeros. - DO LASTV = N, I+1, -1 - IF( V( LASTV, I ).NE.ZERO ) EXIT - END DO - DO J = 1, I-1 - T( J, I ) = -TAU( I ) * V( I , J ) - END DO - J = MIN( LASTV, PREVLASTV ) -* -* T(1:i-1,i) := - tau(i) * V(i:j,1:i-1)**T * V(i:j,i) -* - CALL DGEMV( 'Transpose', J-I, I-1, -TAU( I ), - $ V( I+1, 1 ), LDV, V( I+1, I ), 1, ONE, - $ T( 1, I ), 1 ) - ELSE -* Skip any trailing zeros. - DO LASTV = N, I+1, -1 - IF( V( I, LASTV ).NE.ZERO ) EXIT - END DO - DO J = 1, I-1 - T( J, I ) = -TAU( I ) * V( J , I ) - END DO - J = MIN( LASTV, PREVLASTV ) -* -* T(1:i-1,i) := - tau(i) * V(1:i-1,i:j) * V(i,i:j)**T -* - CALL DGEMV( 'No transpose', I-1, J-I, -TAU( I ), - $ V( 1, I+1 ), LDV, V( I, I+1 ), LDV, ONE, - $ T( 1, I ), 1 ) - END IF -* -* T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) -* - CALL DTRMV( 'Upper', 'No transpose', 'Non-unit', I-1, T, - $ LDT, T( 1, I ), 1 ) - T( I, I ) = TAU( I ) - IF( I.GT.1 ) THEN - PREVLASTV = MAX( PREVLASTV, LASTV ) - ELSE - PREVLASTV = LASTV - END IF - END IF + IF(N.EQ.0.OR.K.EQ.0) THEN + RETURN + END IF +* +* Base case +* + IF(N.EQ.1.OR.K.EQ.1) THEN + T(1,1) = TAU(1) + RETURN + END IF +* +* Beginning of executable statements +* + L = K / 2 +* +* Determine what kind of Q we need to compute +* We assume that if the user doesn't provide 'F' for DIRECT, +* then they meant to provide 'B' and if they don't provide +* 'C' for STOREV, then they meant to provide 'R' +* + DIRF = LSAME(DIRECT,'F') + COLV = LSAME(STOREV,'C') +* +* QR happens when we have forward direction in column storage +* + QR = DIRF.AND.COLV +* +* LQ happens when we have forward direction in row storage +* + LQ = DIRF.AND.(.NOT.COLV) +* +* QL happens when we have backward direction in column storage +* + QL = (.NOT.DIRF).AND.COLV +* +* The last case is RQ. Due to how we structured this, if the +* above 3 are false, then RQ must be true, so we never store +* this +* RQ happens when we have backward direction in row storage +* RQ = (.NOT.DIRF).AND.(.NOT.COLV) +* + IF(QR) THEN +* +* Break V apart into 6 components +* +* V = |---------------| +* |V_{1,1} 0 | +* |V_{2,1} V_{2,2}| +* |V_{3,1} V_{3,2}| +* |---------------| +* +* V_{1,1}\in\R^{l,l} unit lower triangular +* V_{2,1}\in\R^{k-l,l} rectangular +* V_{3,1}\in\R^{n-k,l} rectangular +* +* V_{2,2}\in\R^{k-l,k-l} unit lower triangular +* V_{3,2}\in\R^{n-k,k-l} rectangular +* +* We will construct the T matrix +* T = |---------------| +* |T_{1,1} T_{1,2}| +* |0 T_{2,2}| +* |---------------| +* +* T is the triangular factor obtained from block reflectors. +* To motivate the structure, assume we have already computed T_{1,1} +* and T_{2,2}. Then collect the associated reflectors in V_1 and V_2 +* +* T_{1,1}\in\R^{l, l} upper triangular +* T_{2,2}\in\R^{k-l, k-l} upper triangular +* T_{1,2}\in\R^{l, k-l} rectangular +* +* Where l = floor(k/2) +* +* Then, consider the product: +* +* (I - V_1*T_{1,1}*V_1')*(I - V_2*T_{2,2}*V_2') +* = I - V_1*T_{1,1}*V_1' - V_2*T_{2,2}*V_2' + V_1*T_{1,1}*V_1'*V_2*T_{2,2}*V_2' +* +* Define T_{1,2} = -T_{1,1}*V_1'*V_2*T_{2,2} +* +* Then, we can define the matrix V as +* V = |-------| +* |V_1 V_2| +* |-------| +* +* So, our product is equivalent to the matrix product +* I - V*T*V' +* This means, we can compute T_{1,1} and T_{2,2}, then use this information +* to compute T_{1,2} +* +* Compute T_{1,1} recursively +* + CALL DLARFT(DIRECT, STOREV, N, L, V, LDV, TAU, T, LDT) +* +* Compute T_{2,2} recursively +* + CALL DLARFT(DIRECT, STOREV, N-L, K-L, V(L+1, L+1), LDV, + $ TAU(L+1), T(L+1, L+1), LDT) +* +* Compute T_{1,2} +* T_{1,2} = V_{2,1}' +* + DO J = 1, L + DO I = 1, K-L + T(J, L+I) = V(L+I, J) + END DO END DO - ELSE - PREVLASTV = 1 - DO I = K, 1, -1 - IF( TAU( I ).EQ.ZERO ) THEN -* -* H(i) = I -* - DO J = I, K - T( J, I ) = ZERO - END DO - ELSE -* -* general case -* - IF( I.LT.K ) THEN - IF( LSAME( STOREV, 'C' ) ) THEN -* Skip any leading zeros. - DO LASTV = 1, I-1 - IF( V( LASTV, I ).NE.ZERO ) EXIT - END DO - DO J = I+1, K - T( J, I ) = -TAU( I ) * V( N-K+I , J ) - END DO - J = MAX( LASTV, PREVLASTV ) -* -* T(i+1:k,i) = -tau(i) * V(j:n-k+i,i+1:k)**T * V(j:n-k+i,i) -* - CALL DGEMV( 'Transpose', N-K+I-J, K-I, -TAU( I ), - $ V( J, I+1 ), LDV, V( J, I ), 1, ONE, - $ T( I+1, I ), 1 ) - ELSE -* Skip any leading zeros. - DO LASTV = 1, I-1 - IF( V( I, LASTV ).NE.ZERO ) EXIT - END DO - DO J = I+1, K - T( J, I ) = -TAU( I ) * V( J, N-K+I ) - END DO - J = MAX( LASTV, PREVLASTV ) -* -* T(i+1:k,i) = -tau(i) * V(i+1:k,j:n-k+i) * V(i,j:n-k+i)**T -* - CALL DGEMV( 'No transpose', K-I, N-K+I-J, - $ -TAU( I ), V( I+1, J ), LDV, V( I, J ), LDV, - $ ONE, T( I+1, I ), 1 ) - END IF -* -* T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) -* - CALL DTRMV( 'Lower', 'No transpose', 'Non-unit', K-I, - $ T( I+1, I+1 ), LDT, T( I+1, I ), 1 ) - IF( I.GT.1 ) THEN - PREVLASTV = MIN( PREVLASTV, LASTV ) - ELSE - PREVLASTV = LASTV - END IF - END IF - T( I, I ) = TAU( I ) - END IF +* +* T_{1,2} = T_{1,2}*V_{2,2} +* + CALL DTRMM('Right', 'Lower', 'No transpose', 'Unit', L, + $ K-L, ONE, V(L+1, L+1), LDV, T(1, L+1), LDT) + +* +* T_{1,2} = V_{3,1}'*V_{3,2} + T_{1,2} +* Note: We assume K <= N, and GEMM will do nothing if N=K +* + CALL DGEMM('Transpose', 'No transpose', L, K-L, N-K, ONE, + $ V(K+1, 1), LDV, V(K+1, L+1), LDV, ONE, + $ T(1, L+1), LDT) +* +* At this point, we have that T_{1,2} = V_1'*V_2 +* All that is left is to pre and post multiply by -T_{1,1} and T_{2,2} +* respectively. +* +* T_{1,2} = -T_{1,1}*T_{1,2} +* + CALL DTRMM('Left', 'Upper', 'No transpose', 'Non-unit', L, + $ K-L, NEG_ONE, T, LDT, T(1, L+1), LDT) +* +* T_{1,2} = T_{1,2}*T_{2,2} +* + CALL DTRMM('Right', 'Upper', 'No transpose', 'Non-unit', L, + $ K-L, ONE, T(L+1, L+1), LDT, T(1, L+1), LDT) + + ELSE IF(LQ) THEN +* +* Break V apart into 6 components +* +* V = |----------------------| +* |V_{1,1} V_{1,2} V{1,3}| +* |0 V_{2,2} V{2,3}| +* |----------------------| +* +* V_{1,1}\in\R^{l,l} unit upper triangular +* V_{1,2}\in\R^{l,k-l} rectangular +* V_{1,3}\in\R^{l,n-k} rectangular +* +* V_{2,2}\in\R^{k-l,k-l} unit upper triangular +* V_{2,3}\in\R^{k-l,n-k} rectangular +* +* Where l = floor(k/2) +* +* We will construct the T matrix +* T = |---------------| +* |T_{1,1} T_{1,2}| +* |0 T_{2,2}| +* |---------------| +* +* T is the triangular factor obtained from block reflectors. +* To motivate the structure, assume we have already computed T_{1,1} +* and T_{2,2}. Then collect the associated reflectors in V_1 and V_2 +* +* T_{1,1}\in\R^{l, l} upper triangular +* T_{2,2}\in\R^{k-l, k-l} upper triangular +* T_{1,2}\in\R^{l, k-l} rectangular +* +* Then, consider the product: +* +* (I - V_1'*T_{1,1}*V_1)*(I - V_2'*T_{2,2}*V_2) +* = I - V_1'*T_{1,1}*V_1 - V_2'*T_{2,2}*V_2 + V_1'*T_{1,1}*V_1*V_2'*T_{2,2}*V_2 +* +* Define T_{1,2} = -T_{1,1}*V_1*V_2'*T_{2,2} +* +* Then, we can define the matrix V as +* V = |---| +* |V_1| +* |V_2| +* |---| +* +* So, our product is equivalent to the matrix product +* I - V'*T*V +* This means, we can compute T_{1,1} and T_{2,2}, then use this information +* to compute T_{1,2} +* +* Compute T_{1,1} recursively +* + CALL DLARFT(DIRECT, STOREV, N, L, V, LDV, TAU, T, LDT) +* +* Compute T_{2,2} recursively +* + CALL DLARFT(DIRECT, STOREV, N-L, K-L, V(L+1, L+1), LDV, + $ TAU(L+1), T(L+1, L+1), LDT) + +* +* Compute T_{1,2} +* T_{1,2} = V_{1,2} +* + CALL DLACPY('All', L, K-L, V(1, L+1), LDV, T(1, L+1), LDT) +* +* T_{1,2} = T_{1,2}*V_{2,2}' +* + CALL DTRMM('Right', 'Upper', 'Transpose', 'Unit', L, K-L, + $ ONE, V(L+1, L+1), LDV, T(1, L+1), LDT) + +* +* T_{1,2} = V_{1,3}*V_{2,3}' + T_{1,2} +* Note: We assume K <= N, and GEMM will do nothing if N=K +* + CALL DGEMM('No transpose', 'Transpose', L, K-L, N-K, ONE, + $ V(1, K+1), LDV, V(L+1, K+1), LDV, ONE, + $ T(1, L+1), LDT) +* +* At this point, we have that T_{1,2} = V_1*V_2' +* All that is left is to pre and post multiply by -T_{1,1} and T_{2,2} +* respectively. +* +* T_{1,2} = -T_{1,1}*T_{1,2} +* + CALL DTRMM('Left', 'Upper', 'No transpose', 'Non-unit', L, + $ K-L, NEG_ONE, T, LDT, T(1, L+1), LDT) + +* +* T_{1,2} = T_{1,2}*T_{2,2} +* + CALL DTRMM('Right', 'Upper', 'No transpose', 'Non-unit', L, + $ K-L, ONE, T(L+1, L+1), LDT, T(1, L+1), LDT) + ELSE IF(QL) THEN +* +* Break V apart into 6 components +* +* V = |---------------| +* |V_{1,1} V_{1,2}| +* |V_{2,1} V_{2,2}| +* |0 V_{3,2}| +* |---------------| +* +* V_{1,1}\in\R^{n-k,k-l} rectangular +* V_{2,1}\in\R^{k-l,k-l} unit upper triangular +* +* V_{1,2}\in\R^{n-k,l} rectangular +* V_{2,2}\in\R^{k-l,l} rectangular +* V_{3,2}\in\R^{l,l} unit upper triangular +* +* We will construct the T matrix +* T = |---------------| +* |T_{1,1} 0 | +* |T_{2,1} T_{2,2}| +* |---------------| +* +* T is the triangular factor obtained from block reflectors. +* To motivate the structure, assume we have already computed T_{1,1} +* and T_{2,2}. Then collect the associated reflectors in V_1 and V_2 +* +* T_{1,1}\in\R^{k-l, k-l} non-unit lower triangular +* T_{2,2}\in\R^{l, l} non-unit lower triangular +* T_{2,1}\in\R^{k-l, l} rectangular +* +* Where l = floor(k/2) +* +* Then, consider the product: +* +* (I - V_2*T_{2,2}*V_2')*(I - V_1*T_{1,1}*V_1') +* = I - V_2*T_{2,2}*V_2' - V_1*T_{1,1}*V_1' + V_2*T_{2,2}*V_2'*V_1*T_{1,1}*V_1' +* +* Define T_{2,1} = -T_{2,2}*V_2'*V_1*T_{1,1} +* +* Then, we can define the matrix V as +* V = |-------| +* |V_1 V_2| +* |-------| +* +* So, our product is equivalent to the matrix product +* I - V*T*V' +* This means, we can compute T_{1,1} and T_{2,2}, then use this information +* to compute T_{2,1} +* +* Compute T_{1,1} recursively +* + CALL DLARFT(DIRECT, STOREV, N-L, K-L, V, LDV, TAU, T, LDT) +* +* Compute T_{2,2} recursively +* + CALL DLARFT(DIRECT, STOREV, N, L, V(1, K-L+1), LDV, + $ TAU(K-L+1), T(K-L+1, K-L+1), LDT) +* +* Compute T_{2,1} +* T_{2,1} = V_{2,2}' +* + DO J = 1, K-L + DO I = 1, L + T(K-L+I, J) = V(N-K+J, K-L+I) + END DO END DO - END IF - RETURN * -* End of DLARFT +* T_{2,1} = T_{2,1}*V_{2,1} +* + CALL DTRMM('Right', 'Upper', 'No transpose', 'Unit', L, + $ K-L, ONE, V(N-K+1, 1), LDV, T(K-L+1, 1), LDT) + +* +* T_{2,1} = V_{2,2}'*V_{2,1} + T_{2,1} +* Note: We assume K <= N, and GEMM will do nothing if N=K +* + CALL DGEMM('Transpose', 'No transpose', L, K-L, N-K, ONE, + $ V(1, K-L+1), LDV, V, LDV, ONE, T(K-L+1, 1), + $ LDT) +* +* At this point, we have that T_{2,1} = V_2'*V_1 +* All that is left is to pre and post multiply by -T_{2,2} and T_{1,1} +* respectively. +* +* T_{2,1} = -T_{2,2}*T_{2,1} +* + CALL DTRMM('Left', 'Lower', 'No transpose', 'Non-unit', L, + $ K-L, NEG_ONE, T(K-L+1, K-L+1), LDT, + $ T(K-L+1, 1), LDT) * - END +* T_{2,1} = T_{2,1}*T_{1,1} +* + CALL DTRMM('Right', 'Lower', 'No transpose', 'Non-unit', L, + $ K-L, ONE, T, LDT, T(K-L+1, 1), LDT) + ELSE +* +* Else means RQ case +* +* Break V apart into 6 components +* +* V = |-----------------------| +* |V_{1,1} V_{1,2} 0 | +* |V_{2,1} V_{2,2} V_{2,3}| +* |-----------------------| +* +* V_{1,1}\in\R^{k-l,n-k} rectangular +* V_{1,2}\in\R^{k-l,k-l} unit lower triangular +* +* V_{2,1}\in\R^{l,n-k} rectangular +* V_{2,2}\in\R^{l,k-l} rectangular +* V_{2,3}\in\R^{l,l} unit lower triangular +* +* We will construct the T matrix +* T = |---------------| +* |T_{1,1} 0 | +* |T_{2,1} T_{2,2}| +* |---------------| +* +* T is the triangular factor obtained from block reflectors. +* To motivate the structure, assume we have already computed T_{1,1} +* and T_{2,2}. Then collect the associated reflectors in V_1 and V_2 +* +* T_{1,1}\in\R^{k-l, k-l} non-unit lower triangular +* T_{2,2}\in\R^{l, l} non-unit lower triangular +* T_{2,1}\in\R^{k-l, l} rectangular +* +* Where l = floor(k/2) +* +* Then, consider the product: +* +* (I - V_2'*T_{2,2}*V_2)*(I - V_1'*T_{1,1}*V_1) +* = I - V_2'*T_{2,2}*V_2 - V_1'*T_{1,1}*V_1 + V_2'*T_{2,2}*V_2*V_1'*T_{1,1}*V_1 +* +* Define T_{2,1} = -T_{2,2}*V_2*V_1'*T_{1,1} +* +* Then, we can define the matrix V as +* V = |---| +* |V_1| +* |V_2| +* |---| +* +* So, our product is equivalent to the matrix product +* I - V'*T*V +* This means, we can compute T_{1,1} and T_{2,2}, then use this information +* to compute T_{2,1} +* +* Compute T_{1,1} recursively +* + CALL DLARFT(DIRECT, STOREV, N-L, K-L, V, LDV, TAU, T, LDT) +* +* Compute T_{2,2} recursively +* + CALL DLARFT(DIRECT, STOREV, N, L, V(K-L+1, 1), LDV, + $ TAU(K-L+1), T(K-L+1, K-L+1), LDT) +* +* Compute T_{2,1} +* T_{2,1} = V_{2,2} +* + CALL DLACPY('All', L, K-L, V(K-L+1, N-K+1), LDV, + $ T(K-L+1, 1), LDT) + +* +* T_{2,1} = T_{2,1}*V_{1,2}' +* + CALL DTRMM('Right', 'Lower', 'Transpose', 'Unit', L, K-L, + $ ONE, V(1, N-K+1), LDV, T(K-L+1, 1), LDT) + +* +* T_{2,1} = V_{2,1}*V_{1,1}' + T_{2,1} +* Note: We assume K <= N, and GEMM will do nothing if N=K +* + CALL DGEMM('No transpose', 'Transpose', L, K-L, N-K, ONE, + $ V(K-L+1, 1), LDV, V, LDV, ONE, T(K-L+1, 1), + $ LDT) + +* +* At this point, we have that T_{2,1} = V_2*V_1' +* All that is left is to pre and post multiply by -T_{2,2} and T_{1,1} +* respectively. +* +* T_{2,1} = -T_{2,2}*T_{2,1} +* + CALL DTRMM('Left', 'Lower', 'No tranpose', 'Non-unit', L, + $ K-L, NEG_ONE, T(K-L+1, K-L+1), LDT, + $ T(K-L+1, 1), LDT) + +* +* T_{2,1} = T_{2,1}*T_{1,1} +* + CALL DTRMM('Right', 'Lower', 'No tranpose', 'Non-unit', L, + $ K-L, ONE, T, LDT, T(K-L+1, 1), LDT) + END IF + END SUBROUTINE diff --git a/lapack-netlib/SRC/slarft.f b/lapack-netlib/SRC/slarft.f index 9cfe0ad3f9..ad3a4d924c 100644 --- a/lapack-netlib/SRC/slarft.f +++ b/lapack-netlib/SRC/slarft.f @@ -18,7 +18,7 @@ * Definition: * =========== * -* SUBROUTINE SLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) +* RECURSIVE SUBROUTINE SLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) * * .. Scalar Arguments .. * CHARACTER DIRECT, STOREV @@ -127,10 +127,10 @@ * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley -*> \author Univ. of Colorado Denver +*> \author Johnathan Rhyne, Univ. of Colorado Denver (original author, 2024) *> \author NAG Ltd. * -*> \ingroup realOTHERauxiliary +*> \ingroup larft * *> \par Further Details: * ===================== @@ -159,165 +159,470 @@ *> \endverbatim *> * ===================================================================== - SUBROUTINE SLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) + RECURSIVE SUBROUTINE SLARFT( DIRECT, STOREV, N, K, V, LDV, + $ TAU, T, LDT ) * * -- LAPACK auxiliary routine -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * -* .. Scalar Arguments .. +* .. Scalar Arguments +* CHARACTER DIRECT, STOREV INTEGER K, LDT, LDV, N * .. * .. Array Arguments .. +* REAL T( LDT, * ), TAU( * ), V( LDV, * ) * .. * -* ===================================================================== -* * .. Parameters .. - REAL ONE, ZERO - PARAMETER ( ONE = 1.0E+0, ZERO = 0.0E+0 ) -* .. +* + REAL ONE, NEG_ONE, ZERO + PARAMETER(ONE=1.0E+0, ZERO = 0.0E+0, NEG_ONE=-1.0E+0) +* * .. Local Scalars .. - INTEGER I, J, PREVLASTV, LASTV -* .. +* + INTEGER I,J,L + LOGICAL QR,LQ,QL,DIRF,COLV +* * .. External Subroutines .. - EXTERNAL SGEMV, STRMV -* .. -* .. External Functions .. +* + EXTERNAL STRMM,SGEMM,SLACPY +* +* .. External Functions.. +* LOGICAL LSAME EXTERNAL LSAME +* +* The general scheme used is inspired by the approach inside DGEQRT3 +* which was (at the time of writing this code): +* Based on the algorithm of Elmroth and Gustavson, +* IBM J. Res. Develop. Vol 44 No. 4 July 2000. * .. * .. Executable Statements .. * * Quick return if possible * - IF( N.EQ.0 ) - $ RETURN -* - IF( LSAME( DIRECT, 'F' ) ) THEN - PREVLASTV = N - DO I = 1, K - PREVLASTV = MAX( I, PREVLASTV ) - IF( TAU( I ).EQ.ZERO ) THEN -* -* H(i) = I -* - DO J = 1, I - T( J, I ) = ZERO - END DO - ELSE -* -* general case -* - IF( LSAME( STOREV, 'C' ) ) THEN -* Skip any trailing zeros. - DO LASTV = N, I+1, -1 - IF( V( LASTV, I ).NE.ZERO ) EXIT - END DO - DO J = 1, I-1 - T( J, I ) = -TAU( I ) * V( I , J ) - END DO - J = MIN( LASTV, PREVLASTV ) -* -* T(1:i-1,i) := - tau(i) * V(i:j,1:i-1)**T * V(i:j,i) -* - CALL SGEMV( 'Transpose', J-I, I-1, -TAU( I ), - $ V( I+1, 1 ), LDV, V( I+1, I ), 1, ONE, - $ T( 1, I ), 1 ) - ELSE -* Skip any trailing zeros. - DO LASTV = N, I+1, -1 - IF( V( I, LASTV ).NE.ZERO ) EXIT - END DO - DO J = 1, I-1 - T( J, I ) = -TAU( I ) * V( J , I ) - END DO - J = MIN( LASTV, PREVLASTV ) -* -* T(1:i-1,i) := - tau(i) * V(1:i-1,i:j) * V(i,i:j)**T -* - CALL SGEMV( 'No transpose', I-1, J-I, -TAU( I ), - $ V( 1, I+1 ), LDV, V( I, I+1 ), LDV, - $ ONE, T( 1, I ), 1 ) - END IF -* -* T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) -* - CALL STRMV( 'Upper', 'No transpose', 'Non-unit', I-1, T, - $ LDT, T( 1, I ), 1 ) - T( I, I ) = TAU( I ) - IF( I.GT.1 ) THEN - PREVLASTV = MAX( PREVLASTV, LASTV ) - ELSE - PREVLASTV = LASTV - END IF - END IF + IF(N.EQ.0.OR.K.EQ.0) THEN + RETURN + END IF +* +* Base case +* + IF(N.EQ.1.OR.K.EQ.1) THEN + T(1,1) = TAU(1) + RETURN + END IF +* +* Beginning of executable statements +* + L = K / 2 +* +* Determine what kind of Q we need to compute +* We assume that if the user doesn't provide 'F' for DIRECT, +* then they meant to provide 'B' and if they don't provide +* 'C' for STOREV, then they meant to provide 'R' +* + DIRF = LSAME(DIRECT,'F') + COLV = LSAME(STOREV,'C') +* +* QR happens when we have forward direction in column storage +* + QR = DIRF.AND.COLV +* +* LQ happens when we have forward direction in row storage +* + LQ = DIRF.AND.(.NOT.COLV) +* +* QL happens when we have backward direction in column storage +* + QL = (.NOT.DIRF).AND.COLV +* +* The last case is RQ. Due to how we structured this, if the +* above 3 are false, then RQ must be true, so we never store +* this +* RQ happens when we have backward direction in row storage +* RQ = (.NOT.DIRF).AND.(.NOT.COLV) +* + IF(QR) THEN +* +* Break V apart into 6 components +* +* V = |---------------| +* |V_{1,1} 0 | +* |V_{2,1} V_{2,2}| +* |V_{3,1} V_{3,2}| +* |---------------| +* +* V_{1,1}\in\R^{l,l} unit lower triangular +* V_{2,1}\in\R^{k-l,l} rectangular +* V_{3,1}\in\R^{n-k,l} rectangular +* +* V_{2,2}\in\R^{k-l,k-l} unit lower triangular +* V_{3,2}\in\R^{n-k,k-l} rectangular +* +* We will construct the T matrix +* T = |---------------| +* |T_{1,1} T_{1,2}| +* |0 T_{2,2}| +* |---------------| +* +* T is the triangular factor obtained from block reflectors. +* To motivate the structure, assume we have already computed T_{1,1} +* and T_{2,2}. Then collect the associated reflectors in V_1 and V_2 +* +* T_{1,1}\in\R^{l, l} upper triangular +* T_{2,2}\in\R^{k-l, k-l} upper triangular +* T_{1,2}\in\R^{l, k-l} rectangular +* +* Where l = floor(k/2) +* +* Then, consider the product: +* +* (I - V_1*T_{1,1}*V_1')*(I - V_2*T_{2,2}*V_2') +* = I - V_1*T_{1,1}*V_1' - V_2*T_{2,2}*V_2' + V_1*T_{1,1}*V_1'*V_2*T_{2,2}*V_2' +* +* Define T_{1,2} = -T_{1,1}*V_1'*V_2*T_{2,2} +* +* Then, we can define the matrix V as +* V = |-------| +* |V_1 V_2| +* |-------| +* +* So, our product is equivalent to the matrix product +* I - V*T*V' +* This means, we can compute T_{1,1} and T_{2,2}, then use this information +* to compute T_{1,2} +* +* Compute T_{1,1} recursively +* + CALL SLARFT(DIRECT, STOREV, N, L, V, LDV, TAU, T, LDT) +* +* Compute T_{2,2} recursively +* + CALL SLARFT(DIRECT, STOREV, N-L, K-L, V(L+1, L+1), LDV, + $ TAU(L+1), T(L+1, L+1), LDT) +* +* Compute T_{1,2} +* T_{1,2} = V_{2,1}' +* + DO J = 1, L + DO I = 1, K-L + T(J, L+I) = V(L+I, J) + END DO END DO - ELSE - PREVLASTV = 1 - DO I = K, 1, -1 - IF( TAU( I ).EQ.ZERO ) THEN -* -* H(i) = I -* - DO J = I, K - T( J, I ) = ZERO - END DO - ELSE -* -* general case -* - IF( I.LT.K ) THEN - IF( LSAME( STOREV, 'C' ) ) THEN -* Skip any leading zeros. - DO LASTV = 1, I-1 - IF( V( LASTV, I ).NE.ZERO ) EXIT - END DO - DO J = I+1, K - T( J, I ) = -TAU( I ) * V( N-K+I , J ) - END DO - J = MAX( LASTV, PREVLASTV ) -* -* T(i+1:k,i) = -tau(i) * V(j:n-k+i,i+1:k)**T * V(j:n-k+i,i) -* - CALL SGEMV( 'Transpose', N-K+I-J, K-I, -TAU( I ), - $ V( J, I+1 ), LDV, V( J, I ), 1, ONE, - $ T( I+1, I ), 1 ) - ELSE -* Skip any leading zeros. - DO LASTV = 1, I-1 - IF( V( I, LASTV ).NE.ZERO ) EXIT - END DO - DO J = I+1, K - T( J, I ) = -TAU( I ) * V( J, N-K+I ) - END DO - J = MAX( LASTV, PREVLASTV ) -* -* T(i+1:k,i) = -tau(i) * V(i+1:k,j:n-k+i) * V(i,j:n-k+i)**T -* - CALL SGEMV( 'No transpose', K-I, N-K+I-J, - $ -TAU( I ), V( I+1, J ), LDV, V( I, J ), LDV, - $ ONE, T( I+1, I ), 1 ) - END IF -* -* T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) -* - CALL STRMV( 'Lower', 'No transpose', 'Non-unit', K-I, - $ T( I+1, I+1 ), LDT, T( I+1, I ), 1 ) - IF( I.GT.1 ) THEN - PREVLASTV = MIN( PREVLASTV, LASTV ) - ELSE - PREVLASTV = LASTV - END IF - END IF - T( I, I ) = TAU( I ) - END IF +* +* T_{1,2} = T_{1,2}*V_{2,2} +* + CALL STRMM('Right', 'Lower', 'No transpose', 'Unit', L, + $ K-L, ONE, V(L+1, L+1), LDV, T(1, L+1), LDT) + +* +* T_{1,2} = V_{3,1}'*V_{3,2} + T_{1,2} +* Note: We assume K <= N, and GEMM will do nothing if N=K +* + CALL SGEMM('Transpose', 'No transpose', L, K-L, N-K, ONE, + $ V(K+1, 1), LDV, V(K+1, L+1), LDV, ONE, + $ T(1, L+1), LDT) +* +* At this point, we have that T_{1,2} = V_1'*V_2 +* All that is left is to pre and post multiply by -T_{1,1} and T_{2,2} +* respectively. +* +* T_{1,2} = -T_{1,1}*T_{1,2} +* + CALL STRMM('Left', 'Upper', 'No transpose', 'Non-unit', L, + $ K-L, NEG_ONE, T, LDT, T(1, L+1), LDT) +* +* T_{1,2} = T_{1,2}*T_{2,2} +* + CALL STRMM('Right', 'Upper', 'No transpose', 'Non-unit', L, + $ K-L, ONE, T(L+1, L+1), LDT, T(1, L+1), LDT) + + ELSE IF(LQ) THEN +* +* Break V apart into 6 components +* +* V = |----------------------| +* |V_{1,1} V_{1,2} V{1,3}| +* |0 V_{2,2} V{2,3}| +* |----------------------| +* +* V_{1,1}\in\R^{l,l} unit upper triangular +* V_{1,2}\in\R^{l,k-l} rectangular +* V_{1,3}\in\R^{l,n-k} rectangular +* +* V_{2,2}\in\R^{k-l,k-l} unit upper triangular +* V_{2,3}\in\R^{k-l,n-k} rectangular +* +* Where l = floor(k/2) +* +* We will construct the T matrix +* T = |---------------| +* |T_{1,1} T_{1,2}| +* |0 T_{2,2}| +* |---------------| +* +* T is the triangular factor obtained from block reflectors. +* To motivate the structure, assume we have already computed T_{1,1} +* and T_{2,2}. Then collect the associated reflectors in V_1 and V_2 +* +* T_{1,1}\in\R^{l, l} upper triangular +* T_{2,2}\in\R^{k-l, k-l} upper triangular +* T_{1,2}\in\R^{l, k-l} rectangular +* +* Then, consider the product: +* +* (I - V_1'*T_{1,1}*V_1)*(I - V_2'*T_{2,2}*V_2) +* = I - V_1'*T_{1,1}*V_1 - V_2'*T_{2,2}*V_2 + V_1'*T_{1,1}*V_1*V_2'*T_{2,2}*V_2 +* +* Define T_{1,2} = -T_{1,1}*V_1*V_2'*T_{2,2} +* +* Then, we can define the matrix V as +* V = |---| +* |V_1| +* |V_2| +* |---| +* +* So, our product is equivalent to the matrix product +* I - V'*T*V +* This means, we can compute T_{1,1} and T_{2,2}, then use this information +* to compute T_{1,2} +* +* Compute T_{1,1} recursively +* + CALL SLARFT(DIRECT, STOREV, N, L, V, LDV, TAU, T, LDT) +* +* Compute T_{2,2} recursively +* + CALL SLARFT(DIRECT, STOREV, N-L, K-L, V(L+1, L+1), LDV, + $ TAU(L+1), T(L+1, L+1), LDT) + +* +* Compute T_{1,2} +* T_{1,2} = V_{1,2} +* + CALL SLACPY('All', L, K-L, V(1, L+1), LDV, T(1, L+1), LDT) +* +* T_{1,2} = T_{1,2}*V_{2,2}' +* + CALL STRMM('Right', 'Upper', 'Transpose', 'Unit', L, K-L, + $ ONE, V(L+1, L+1), LDV, T(1, L+1), LDT) + +* +* T_{1,2} = V_{1,3}*V_{2,3}' + T_{1,2} +* Note: We assume K <= N, and GEMM will do nothing if N=K +* + CALL SGEMM('No transpose', 'Transpose', L, K-L, N-K, ONE, + $ V(1, K+1), LDV, V(L+1, K+1), LDV, ONE, + $ T(1, L+1), LDT) +* +* At this point, we have that T_{1,2} = V_1*V_2' +* All that is left is to pre and post multiply by -T_{1,1} and T_{2,2} +* respectively. +* +* T_{1,2} = -T_{1,1}*T_{1,2} +* + CALL STRMM('Left', 'Upper', 'No transpose', 'Non-unit', L, + $ K-L, NEG_ONE, T, LDT, T(1, L+1), LDT) + +* +* T_{1,2} = T_{1,2}*T_{2,2} +* + CALL STRMM('Right', 'Upper', 'No transpose', 'Non-unit', L, + $ K-L, ONE, T(L+1, L+1), LDT, T(1, L+1), LDT) + ELSE IF(QL) THEN +* +* Break V apart into 6 components +* +* V = |---------------| +* |V_{1,1} V_{1,2}| +* |V_{2,1} V_{2,2}| +* |0 V_{3,2}| +* |---------------| +* +* V_{1,1}\in\R^{n-k,k-l} rectangular +* V_{2,1}\in\R^{k-l,k-l} unit upper triangular +* +* V_{1,2}\in\R^{n-k,l} rectangular +* V_{2,2}\in\R^{k-l,l} rectangular +* V_{3,2}\in\R^{l,l} unit upper triangular +* +* We will construct the T matrix +* T = |---------------| +* |T_{1,1} 0 | +* |T_{2,1} T_{2,2}| +* |---------------| +* +* T is the triangular factor obtained from block reflectors. +* To motivate the structure, assume we have already computed T_{1,1} +* and T_{2,2}. Then collect the associated reflectors in V_1 and V_2 +* +* T_{1,1}\in\R^{k-l, k-l} non-unit lower triangular +* T_{2,2}\in\R^{l, l} non-unit lower triangular +* T_{2,1}\in\R^{k-l, l} rectangular +* +* Where l = floor(k/2) +* +* Then, consider the product: +* +* (I - V_2*T_{2,2}*V_2')*(I - V_1*T_{1,1}*V_1') +* = I - V_2*T_{2,2}*V_2' - V_1*T_{1,1}*V_1' + V_2*T_{2,2}*V_2'*V_1*T_{1,1}*V_1' +* +* Define T_{2,1} = -T_{2,2}*V_2'*V_1*T_{1,1} +* +* Then, we can define the matrix V as +* V = |-------| +* |V_1 V_2| +* |-------| +* +* So, our product is equivalent to the matrix product +* I - V*T*V' +* This means, we can compute T_{1,1} and T_{2,2}, then use this information +* to compute T_{2,1} +* +* Compute T_{1,1} recursively +* + CALL SLARFT(DIRECT, STOREV, N-L, K-L, V, LDV, TAU, T, LDT) +* +* Compute T_{2,2} recursively +* + CALL SLARFT(DIRECT, STOREV, N, L, V(1, K-L+1), LDV, + $ TAU(K-L+1), T(K-L+1, K-L+1), LDT) +* +* Compute T_{2,1} +* T_{2,1} = V_{2,2}' +* + DO J = 1, K-L + DO I = 1, L + T(K-L+I, J) = V(N-K+J, K-L+I) + END DO END DO - END IF - RETURN * -* End of SLARFT +* T_{2,1} = T_{2,1}*V_{2,1} +* + CALL STRMM('Right', 'Upper', 'No transpose', 'Unit', L, + $ K-L, ONE, V(N-K+1, 1), LDV, T(K-L+1, 1), LDT) + +* +* T_{2,1} = V_{2,2}'*V_{2,1} + T_{2,1} +* Note: We assume K <= N, and GEMM will do nothing if N=K +* + CALL SGEMM('Transpose', 'No transpose', L, K-L, N-K, ONE, + $ V(1, K-L+1), LDV, V, LDV, ONE, T(K-L+1, 1), + $ LDT) +* +* At this point, we have that T_{2,1} = V_2'*V_1 +* All that is left is to pre and post multiply by -T_{2,2} and T_{1,1} +* respectively. +* +* T_{2,1} = -T_{2,2}*T_{2,1} +* + CALL STRMM('Left', 'Lower', 'No transpose', 'Non-unit', L, + $ K-L, NEG_ONE, T(K-L+1, K-L+1), LDT, + $ T(K-L+1, 1), LDT) * - END +* T_{2,1} = T_{2,1}*T_{1,1} +* + CALL STRMM('Right', 'Lower', 'No transpose', 'Non-unit', L, + $ K-L, ONE, T, LDT, T(K-L+1, 1), LDT) + ELSE +* +* Else means RQ case +* +* Break V apart into 6 components +* +* V = |-----------------------| +* |V_{1,1} V_{1,2} 0 | +* |V_{2,1} V_{2,2} V_{2,3}| +* |-----------------------| +* +* V_{1,1}\in\R^{k-l,n-k} rectangular +* V_{1,2}\in\R^{k-l,k-l} unit lower triangular +* +* V_{2,1}\in\R^{l,n-k} rectangular +* V_{2,2}\in\R^{l,k-l} rectangular +* V_{2,3}\in\R^{l,l} unit lower triangular +* +* We will construct the T matrix +* T = |---------------| +* |T_{1,1} 0 | +* |T_{2,1} T_{2,2}| +* |---------------| +* +* T is the triangular factor obtained from block reflectors. +* To motivate the structure, assume we have already computed T_{1,1} +* and T_{2,2}. Then collect the associated reflectors in V_1 and V_2 +* +* T_{1,1}\in\R^{k-l, k-l} non-unit lower triangular +* T_{2,2}\in\R^{l, l} non-unit lower triangular +* T_{2,1}\in\R^{k-l, l} rectangular +* +* Where l = floor(k/2) +* +* Then, consider the product: +* +* (I - V_2'*T_{2,2}*V_2)*(I - V_1'*T_{1,1}*V_1) +* = I - V_2'*T_{2,2}*V_2 - V_1'*T_{1,1}*V_1 + V_2'*T_{2,2}*V_2*V_1'*T_{1,1}*V_1 +* +* Define T_{2,1} = -T_{2,2}*V_2*V_1'*T_{1,1} +* +* Then, we can define the matrix V as +* V = |---| +* |V_1| +* |V_2| +* |---| +* +* So, our product is equivalent to the matrix product +* I - V'TV +* This means, we can compute T_{1,1} and T_{2,2}, then use this information +* to compute T_{2,1} +* +* Compute T_{1,1} recursively +* + CALL SLARFT(DIRECT, STOREV, N-L, K-L, V, LDV, TAU, T, LDT) +* +* Compute T_{2,2} recursively +* + CALL SLARFT(DIRECT, STOREV, N, L, V(K-L+1, 1), LDV, + $ TAU(K-L+1), T(K-L+1, K-L+1), LDT) +* +* Compute T_{2,1} +* T_{2,1} = V_{2,2} +* + CALL SLACPY('All', L, K-L, V(K-L+1, N-K+1), LDV, + $ T(K-L+1, 1), LDT) + +* +* T_{2,1} = T_{2,1}*V_{1,2}' +* + CALL STRMM('Right', 'Lower', 'Transpose', 'Unit', L, K-L, + $ ONE, V(1, N-K+1), LDV, T(K-L+1, 1), LDT) + +* +* T_{2,1} = V_{2,1}*V_{1,1}' + T_{2,1} +* Note: We assume K <= N, and GEMM will do nothing if N=K +* + CALL SGEMM('No transpose', 'Transpose', L, K-L, N-K, ONE, + $ V(K-L+1, 1), LDV, V, LDV, ONE, T(K-L+1, 1), + $ LDT) + +* +* At this point, we have that T_{2,1} = V_2*V_1' +* All that is left is to pre and post multiply by -T_{2,2} and T_{1,1} +* respectively. +* +* T_{2,1} = -T_{2,2}*T_{2,1} +* + CALL STRMM('Left', 'Lower', 'No tranpose', 'Non-unit', L, + $ K-L, NEG_ONE, T(K-L+1, K-L+1), LDT, + $ T(K-L+1, 1), LDT) + +* +* T_{2,1} = T_{2,1}*T_{1,1} +* + CALL STRMM('Right', 'Lower', 'No tranpose', 'Non-unit', L, + $ K-L, ONE, T, LDT, T(K-L+1, 1), LDT) + END IF + END SUBROUTINE diff --git a/lapack-netlib/SRC/zlarft.f b/lapack-netlib/SRC/zlarft.f index 5ad0996fab..900795afad 100644 --- a/lapack-netlib/SRC/zlarft.f +++ b/lapack-netlib/SRC/zlarft.f @@ -18,7 +18,7 @@ * Definition: * =========== * -* SUBROUTINE ZLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) +* RECURSIVE SUBROUTINE ZLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) * * .. Scalar Arguments .. * CHARACTER DIRECT, STOREV @@ -130,7 +130,7 @@ *> \author Univ. of Colorado Denver *> \author NAG Ltd. * -*> \ingroup complex16OTHERauxiliary +*> \ingroup larft * *> \par Further Details: * ===================== @@ -159,166 +159,474 @@ *> \endverbatim *> * ===================================================================== - SUBROUTINE ZLARFT( DIRECT, STOREV, N, K, V, LDV, TAU, T, LDT ) + RECURSIVE SUBROUTINE ZLARFT( DIRECT, STOREV, N, K, V, LDV, + $ TAU, T, LDT ) * * -- LAPACK auxiliary routine -- * -- LAPACK is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * -* .. Scalar Arguments .. - CHARACTER DIRECT, STOREV - INTEGER K, LDT, LDV, N +* .. Scalar Arguments +* + CHARACTER DIRECT, STOREV + INTEGER K, LDT, LDV, N * .. * .. Array Arguments .. - COMPLEX*16 T( LDT, * ), TAU( * ), V( LDV, * ) -* .. * -* ===================================================================== + COMPLEX*16 T( LDT, * ), TAU( * ), V( LDV, * ) +* .. * * .. Parameters .. - COMPLEX*16 ONE, ZERO - PARAMETER ( ONE = ( 1.0D+0, 0.0D+0 ), - $ ZERO = ( 0.0D+0, 0.0D+0 ) ) -* .. +* + COMPLEX*16 ONE, NEG_ONE, ZERO + PARAMETER(ONE=1.0D+0, ZERO = 0.0D+0, NEG_ONE=-1.0D+0) +* * .. Local Scalars .. - INTEGER I, J, PREVLASTV, LASTV -* .. +* + INTEGER I,J,L + LOGICAL QR,LQ,QL,DIRF,COLV +* * .. External Subroutines .. - EXTERNAL ZGEMV, ZTRMV, ZGEMM -* .. -* .. External Functions .. - LOGICAL LSAME - EXTERNAL LSAME +* + EXTERNAL ZTRMM,ZGEMM,ZLACPY +* +* .. External Functions.. +* + LOGICAL LSAME + EXTERNAL LSAME +* +* .. Intrinsic Functions.. +* + INTRINSIC CONJG +* +* The general scheme used is inspired by the approach inside DGEQRT3 +* which was (at the time of writing this code): +* Based on the algorithm of Elmroth and Gustavson, +* IBM J. Res. Develop. Vol 44 No. 4 July 2000. * .. * .. Executable Statements .. * * Quick return if possible * - IF( N.EQ.0 ) - $ RETURN -* - IF( LSAME( DIRECT, 'F' ) ) THEN - PREVLASTV = N - DO I = 1, K - PREVLASTV = MAX( PREVLASTV, I ) - IF( TAU( I ).EQ.ZERO ) THEN -* -* H(i) = I -* - DO J = 1, I - T( J, I ) = ZERO - END DO - ELSE -* -* general case -* - IF( LSAME( STOREV, 'C' ) ) THEN -* Skip any trailing zeros. - DO LASTV = N, I+1, -1 - IF( V( LASTV, I ).NE.ZERO ) EXIT - END DO - DO J = 1, I-1 - T( J, I ) = -TAU( I ) * CONJG( V( I , J ) ) - END DO - J = MIN( LASTV, PREVLASTV ) -* -* T(1:i-1,i) := - tau(i) * V(i:j,1:i-1)**H * V(i:j,i) -* - CALL ZGEMV( 'Conjugate transpose', J-I, I-1, - $ -TAU( I ), V( I+1, 1 ), LDV, - $ V( I+1, I ), 1, ONE, T( 1, I ), 1 ) - ELSE -* Skip any trailing zeros. - DO LASTV = N, I+1, -1 - IF( V( I, LASTV ).NE.ZERO ) EXIT - END DO - DO J = 1, I-1 - T( J, I ) = -TAU( I ) * V( J , I ) - END DO - J = MIN( LASTV, PREVLASTV ) -* -* T(1:i-1,i) := - tau(i) * V(1:i-1,i:j) * V(i,i:j)**H -* - CALL ZGEMM( 'N', 'C', I-1, 1, J-I, -TAU( I ), - $ V( 1, I+1 ), LDV, V( I, I+1 ), LDV, - $ ONE, T( 1, I ), LDT ) - END IF -* -* T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) -* - CALL ZTRMV( 'Upper', 'No transpose', 'Non-unit', I-1, T, - $ LDT, T( 1, I ), 1 ) - T( I, I ) = TAU( I ) - IF( I.GT.1 ) THEN - PREVLASTV = MAX( PREVLASTV, LASTV ) - ELSE - PREVLASTV = LASTV - END IF - END IF + IF(N.EQ.0.OR.K.EQ.0) THEN + RETURN + END IF +* +* Base case +* + IF(N.EQ.1.OR.K.EQ.1) THEN + T(1,1) = TAU(1) + RETURN + END IF +* +* Beginning of executable statements +* + L = K / 2 +* +* Determine what kind of Q we need to compute +* We assume that if the user doesn't provide 'F' for DIRECT, +* then they meant to provide 'B' and if they don't provide +* 'C' for STOREV, then they meant to provide 'R' +* + DIRF = LSAME(DIRECT,'F') + COLV = LSAME(STOREV,'C') +* +* QR happens when we have forward direction in column storage +* + QR = DIRF.AND.COLV +* +* LQ happens when we have forward direction in row storage +* + LQ = DIRF.AND.(.NOT.COLV) +* +* QL happens when we have backward direction in column storage +* + QL = (.NOT.DIRF).AND.COLV +* +* The last case is RQ. Due to how we structured this, if the +* above 3 are false, then RQ must be true, so we never store +* this +* RQ happens when we have backward direction in row storage +* RQ = (.NOT.DIRF).AND.(.NOT.COLV) +* + IF(QR) THEN +* +* Break V apart into 6 components +* +* V = |---------------| +* |V_{1,1} 0 | +* |V_{2,1} V_{2,2}| +* |V_{3,1} V_{3,2}| +* |---------------| +* +* V_{1,1}\in\C^{l,l} unit lower triangular +* V_{2,1}\in\C^{k-l,l} rectangular +* V_{3,1}\in\C^{n-k,l} rectangular +* +* V_{2,2}\in\C^{k-l,k-l} unit lower triangular +* V_{3,2}\in\C^{n-k,k-l} rectangular +* +* We will construct the T matrix +* T = |---------------| +* |T_{1,1} T_{1,2}| +* |0 T_{2,2}| +* |---------------| +* +* T is the triangular factor obtained from block reflectors. +* To motivate the structure, assume we have already computed T_{1,1} +* and T_{2,2}. Then collect the associated reflectors in V_1 and V_2 +* +* T_{1,1}\in\C^{l, l} upper triangular +* T_{2,2}\in\C^{k-l, k-l} upper triangular +* T_{1,2}\in\C^{l, k-l} rectangular +* +* Where l = floor(k/2) +* +* Then, consider the product: +* +* (I - V_1*T_{1,1}*V_1')*(I - V_2*T_{2,2}*V_2') +* = I - V_1*T_{1,1}*V_1' - V_2*T_{2,2}*V_2' + V_1*T_{1,1}*V_1'*V_2*T_{2,2}*V_2' +* +* Define T_{1,2} = -T_{1,1}*V_1'*V_2*T_{2,2} +* +* Then, we can define the matrix V as +* V = |-------| +* |V_1 V_2| +* |-------| +* +* So, our product is equivalent to the matrix product +* I - V*T*V' +* This means, we can compute T_{1,1} and T_{2,2}, then use this information +* to compute T_{1,2} +* +* Compute T_{1,1} recursively +* + CALL ZLARFT(DIRECT, STOREV, N, L, V, LDV, TAU, T, LDT) +* +* Compute T_{2,2} recursively +* + CALL ZLARFT(DIRECT, STOREV, N-L, K-L, V(L+1, L+1), LDV, + $ TAU(L+1), T(L+1, L+1), LDT) +* +* Compute T_{1,2} +* T_{1,2} = V_{2,1}' +* + DO J = 1, L + DO I = 1, K-L + T(J, L+I) = CONJG(V(L+I, J)) + END DO END DO - ELSE - PREVLASTV = 1 - DO I = K, 1, -1 - IF( TAU( I ).EQ.ZERO ) THEN -* -* H(i) = I -* - DO J = I, K - T( J, I ) = ZERO - END DO - ELSE -* -* general case -* - IF( I.LT.K ) THEN - IF( LSAME( STOREV, 'C' ) ) THEN -* Skip any leading zeros. - DO LASTV = 1, I-1 - IF( V( LASTV, I ).NE.ZERO ) EXIT - END DO - DO J = I+1, K - T( J, I ) = -TAU( I ) * CONJG( V( N-K+I , J ) ) - END DO - J = MAX( LASTV, PREVLASTV ) -* -* T(i+1:k,i) = -tau(i) * V(j:n-k+i,i+1:k)**H * V(j:n-k+i,i) -* - CALL ZGEMV( 'Conjugate transpose', N-K+I-J, K-I, - $ -TAU( I ), V( J, I+1 ), LDV, V( J, I ), - $ 1, ONE, T( I+1, I ), 1 ) - ELSE -* Skip any leading zeros. - DO LASTV = 1, I-1 - IF( V( I, LASTV ).NE.ZERO ) EXIT - END DO - DO J = I+1, K - T( J, I ) = -TAU( I ) * V( J, N-K+I ) - END DO - J = MAX( LASTV, PREVLASTV ) -* -* T(i+1:k,i) = -tau(i) * V(i+1:k,j:n-k+i) * V(i,j:n-k+i)**H -* - CALL ZGEMM( 'N', 'C', K-I, 1, N-K+I-J, -TAU( I ), - $ V( I+1, J ), LDV, V( I, J ), LDV, - $ ONE, T( I+1, I ), LDT ) - END IF -* -* T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) -* - CALL ZTRMV( 'Lower', 'No transpose', 'Non-unit', K-I, - $ T( I+1, I+1 ), LDT, T( I+1, I ), 1 ) - IF( I.GT.1 ) THEN - PREVLASTV = MIN( PREVLASTV, LASTV ) - ELSE - PREVLASTV = LASTV - END IF - END IF - T( I, I ) = TAU( I ) - END IF +* +* T_{1,2} = T_{1,2}*V_{2,2} +* + CALL ZTRMM('Right', 'Lower', 'No transpose', 'Unit', L, + $ K-L, ONE, V(L+1, L+1), LDV, T(1, L+1), LDT) + +* +* T_{1,2} = V_{3,1}'*V_{3,2} + T_{1,2} +* Note: We assume K <= N, and GEMM will do nothing if N=K +* + CALL ZGEMM('Conjugate', 'No transpose', L, K-L, N-K, ONE, + $ V(K+1, 1), LDV, V(K+1, L+1), LDV, ONE, + $ T(1, L+1), LDT) +* +* At this point, we have that T_{1,2} = V_1'*V_2 +* All that is left is to pre and post multiply by -T_{1,1} and T_{2,2} +* respectively. +* +* T_{1,2} = -T_{1,1}*T_{1,2} +* + CALL ZTRMM('Left', 'Upper', 'No transpose', 'Non-unit', L, + $ K-L, NEG_ONE, T, LDT, T(1, L+1), LDT) +* +* T_{1,2} = T_{1,2}*T_{2,2} +* + CALL ZTRMM('Right', 'Upper', 'No transpose', 'Non-unit', L, + $ K-L, ONE, T(L+1, L+1), LDT, T(1, L+1), LDT) + + ELSE IF(LQ) THEN +* +* Break V apart into 6 components +* +* V = |----------------------| +* |V_{1,1} V_{1,2} V{1,3}| +* |0 V_{2,2} V{2,3}| +* |----------------------| +* +* V_{1,1}\in\C^{l,l} unit upper triangular +* V_{1,2}\in\C^{l,k-l} rectangular +* V_{1,3}\in\C^{l,n-k} rectangular +* +* V_{2,2}\in\C^{k-l,k-l} unit upper triangular +* V_{2,3}\in\C^{k-l,n-k} rectangular +* +* Where l = floor(k/2) +* +* We will construct the T matrix +* T = |---------------| +* |T_{1,1} T_{1,2}| +* |0 T_{2,2}| +* |---------------| +* +* T is the triangular factor obtained from block reflectors. +* To motivate the structure, assume we have already computed T_{1,1} +* and T_{2,2}. Then collect the associated reflectors in V_1 and V_2 +* +* T_{1,1}\in\C^{l, l} upper triangular +* T_{2,2}\in\C^{k-l, k-l} upper triangular +* T_{1,2}\in\C^{l, k-l} rectangular +* +* Then, consider the product: +* +* (I - V_1'*T_{1,1}*V_1)*(I - V_2'*T_{2,2}*V_2) +* = I - V_1'*T_{1,1}*V_1 - V_2'*T_{2,2}*V_2 + V_1'*T_{1,1}*V_1*V_2'*T_{2,2}*V_2 +* +* Define T_{1,2} = -T_{1,1}*V_1*V_2'*T_{2,2} +* +* Then, we can define the matrix V as +* V = |---| +* |V_1| +* |V_2| +* |---| +* +* So, our product is equivalent to the matrix product +* I - V'*T*V +* This means, we can compute T_{1,1} and T_{2,2}, then use this information +* to compute T_{1,2} +* +* Compute T_{1,1} recursively +* + CALL ZLARFT(DIRECT, STOREV, N, L, V, LDV, TAU, T, LDT) +* +* Compute T_{2,2} recursively +* + CALL ZLARFT(DIRECT, STOREV, N-L, K-L, V(L+1, L+1), LDV, + $ TAU(L+1), T(L+1, L+1), LDT) + +* +* Compute T_{1,2} +* T_{1,2} = V_{1,2} +* + CALL ZLACPY('All', L, K-L, V(1, L+1), LDV, T(1, L+1), LDT) +* +* T_{1,2} = T_{1,2}*V_{2,2}' +* + CALL ZTRMM('Right', 'Upper', 'Conjugate', 'Unit', L, K-L, + $ ONE, V(L+1, L+1), LDV, T(1, L+1), LDT) + +* +* T_{1,2} = V_{1,3}*V_{2,3}' + T_{1,2} +* Note: We assume K <= N, and GEMM will do nothing if N=K +* + CALL ZGEMM('No transpose', 'Conjugate', L, K-L, N-K, ONE, + $ V(1, K+1), LDV, V(L+1, K+1), LDV, ONE, + $ T(1, L+1), LDT) +* +* At this point, we have that T_{1,2} = V_1*V_2' +* All that is left is to pre and post multiply by -T_{1,1} and T_{2,2} +* respectively. +* +* T_{1,2} = -T_{1,1}*T_{1,2} +* + CALL ZTRMM('Left', 'Upper', 'No transpose', 'Non-unit', L, + $ K-L, NEG_ONE, T, LDT, T(1, L+1), LDT) + +* +* T_{1,2} = T_{1,2}*T_{2,2} +* + CALL ZTRMM('Right', 'Upper', 'No transpose', 'Non-unit', L, + $ K-L, ONE, T(L+1, L+1), LDT, T(1, L+1), LDT) + ELSE IF(QL) THEN +* +* Break V apart into 6 components +* +* V = |---------------| +* |V_{1,1} V_{1,2}| +* |V_{2,1} V_{2,2}| +* |0 V_{3,2}| +* |---------------| +* +* V_{1,1}\in\C^{n-k,k-l} rectangular +* V_{2,1}\in\C^{k-l,k-l} unit upper triangular +* +* V_{1,2}\in\C^{n-k,l} rectangular +* V_{2,2}\in\C^{k-l,l} rectangular +* V_{3,2}\in\C^{l,l} unit upper triangular +* +* We will construct the T matrix +* T = |---------------| +* |T_{1,1} 0 | +* |T_{2,1} T_{2,2}| +* |---------------| +* +* T is the triangular factor obtained from block reflectors. +* To motivate the structure, assume we have already computed T_{1,1} +* and T_{2,2}. Then collect the associated reflectors in V_1 and V_2 +* +* T_{1,1}\in\C^{k-l, k-l} non-unit lower triangular +* T_{2,2}\in\C^{l, l} non-unit lower triangular +* T_{2,1}\in\C^{k-l, l} rectangular +* +* Where l = floor(k/2) +* +* Then, consider the product: +* +* (I - V_2*T_{2,2}*V_2')*(I - V_1*T_{1,1}*V_1') +* = I - V_2*T_{2,2}*V_2' - V_1*T_{1,1}*V_1' + V_2*T_{2,2}*V_2'*V_1*T_{1,1}*V_1' +* +* Define T_{2,1} = -T_{2,2}*V_2'*V_1*T_{1,1} +* +* Then, we can define the matrix V as +* V = |-------| +* |V_1 V_2| +* |-------| +* +* So, our product is equivalent to the matrix product +* I - V*T*V' +* This means, we can compute T_{1,1} and T_{2,2}, then use this information +* to compute T_{2,1} +* +* Compute T_{1,1} recursively +* + CALL ZLARFT(DIRECT, STOREV, N-L, K-L, V, LDV, TAU, T, LDT) +* +* Compute T_{2,2} recursively +* + CALL ZLARFT(DIRECT, STOREV, N, L, V(1, K-L+1), LDV, + $ TAU(K-L+1), T(K-L+1, K-L+1), LDT) +* +* Compute T_{2,1} +* T_{2,1} = V_{2,2}' +* + DO J = 1, K-L + DO I = 1, L + T(K-L+I, J) = CONJG(V(N-K+J, K-L+I)) + END DO END DO - END IF - RETURN * -* End of ZLARFT +* T_{2,1} = T_{2,1}*V_{2,1} +* + CALL ZTRMM('Right', 'Upper', 'No transpose', 'Unit', L, + $ K-L, ONE, V(N-K+1, 1), LDV, T(K-L+1, 1), LDT) + +* +* T_{2,1} = V_{2,2}'*V_{2,1} + T_{2,1} +* Note: We assume K <= N, and GEMM will do nothing if N=K +* + CALL ZGEMM('Conjugate', 'No transpose', L, K-L, N-K, ONE, + $ V(1, K-L+1), LDV, V, LDV, ONE, T(K-L+1, 1), + $ LDT) +* +* At this point, we have that T_{2,1} = V_2'*V_1 +* All that is left is to pre and post multiply by -T_{2,2} and T_{1,1} +* respectively. +* +* T_{2,1} = -T_{2,2}*T_{2,1} +* + CALL ZTRMM('Left', 'Lower', 'No transpose', 'Non-unit', L, + $ K-L, NEG_ONE, T(K-L+1, K-L+1), LDT, + $ T(K-L+1, 1), LDT) * - END +* T_{2,1} = T_{2,1}*T_{1,1} +* + CALL ZTRMM('Right', 'Lower', 'No transpose', 'Non-unit', L, + $ K-L, ONE, T, LDT, T(K-L+1, 1), LDT) + ELSE +* +* Else means RQ case +* +* Break V apart into 6 components +* +* V = |-----------------------| +* |V_{1,1} V_{1,2} 0 | +* |V_{2,1} V_{2,2} V_{2,3}| +* |-----------------------| +* +* V_{1,1}\in\C^{k-l,n-k} rectangular +* V_{1,2}\in\C^{k-l,k-l} unit lower triangular +* +* V_{2,1}\in\C^{l,n-k} rectangular +* V_{2,2}\in\C^{l,k-l} rectangular +* V_{2,3}\in\C^{l,l} unit lower triangular +* +* We will construct the T matrix +* T = |---------------| +* |T_{1,1} 0 | +* |T_{2,1} T_{2,2}| +* |---------------| +* +* T is the triangular factor obtained from block reflectors. +* To motivate the structure, assume we have already computed T_{1,1} +* and T_{2,2}. Then collect the associated reflectors in V_1 and V_2 +* +* T_{1,1}\in\C^{k-l, k-l} non-unit lower triangular +* T_{2,2}\in\C^{l, l} non-unit lower triangular +* T_{2,1}\in\C^{k-l, l} rectangular +* +* Where l = floor(k/2) +* +* Then, consider the product: +* +* (I - V_2'*T_{2,2}*V_2)*(I - V_1'*T_{1,1}*V_1) +* = I - V_2'*T_{2,2}*V_2 - V_1'*T_{1,1}*V_1 + V_2'*T_{2,2}*V_2*V_1'*T_{1,1}*V_1 +* +* Define T_{2,1} = -T_{2,2}*V_2*V_1'*T_{1,1} +* +* Then, we can define the matrix V as +* V = |---| +* |V_1| +* |V_2| +* |---| +* +* So, our product is equivalent to the matrix product +* I - V'*T*V +* This means, we can compute T_{1,1} and T_{2,2}, then use this information +* to compute T_{2,1} +* +* Compute T_{1,1} recursively +* + CALL ZLARFT(DIRECT, STOREV, N-L, K-L, V, LDV, TAU, T, LDT) +* +* Compute T_{2,2} recursively +* + CALL ZLARFT(DIRECT, STOREV, N, L, V(K-L+1, 1), LDV, + $ TAU(K-L+1), T(K-L+1, K-L+1), LDT) +* +* Compute T_{2,1} +* T_{2,1} = V_{2,2} +* + CALL ZLACPY('All', L, K-L, V(K-L+1, N-K+1), LDV, + $ T(K-L+1, 1), LDT) + +* +* T_{2,1} = T_{2,1}*V_{1,2}' +* + CALL ZTRMM('Right', 'Lower', 'Conjugate', 'Unit', L, K-L, + $ ONE, V(1, N-K+1), LDV, T(K-L+1, 1), LDT) + +* +* T_{2,1} = V_{2,1}*V_{1,1}' + T_{2,1} +* Note: We assume K <= N, and GEMM will do nothing if N=K +* + CALL ZGEMM('No transpose', 'Conjugate', L, K-L, N-K, ONE, + $ V(K-L+1, 1), LDV, V, LDV, ONE, T(K-L+1, 1), + $ LDT) + +* +* At this point, we have that T_{2,1} = V_2*V_1' +* All that is left is to pre and post multiply by -T_{2,2} and T_{1,1} +* respectively. +* +* T_{2,1} = -T_{2,2}*T_{2,1} +* + CALL ZTRMM('Left', 'Lower', 'No tranpose', 'Non-unit', L, + $ K-L, NEG_ONE, T(K-L+1, K-L+1), LDT, + $ T(K-L+1, 1), LDT) + +* +* T_{2,1} = T_{2,1}*T_{1,1} +* + CALL ZTRMM('Right', 'Lower', 'No tranpose', 'Non-unit', L, + $ K-L, ONE, T, LDT, T(K-L+1, 1), LDT) + END IF + END SUBROUTINE diff --git a/lapack-netlib/TESTING/EIG/cchkhb2stg.f b/lapack-netlib/TESTING/EIG/cchkhb2stg.f index 1a11ac5eaf..7500c22791 100644 --- a/lapack-netlib/TESTING/EIG/cchkhb2stg.f +++ b/lapack-netlib/TESTING/EIG/cchkhb2stg.f @@ -852,8 +852,9 @@ SUBROUTINE CCHKHB2STG( NSIZES, NN, NWDTHS, KK, NTYPES, DOTYPE, CALL SLASUM( 'CHB', NOUNIT, NERRS, NTESTT ) RETURN * - 9999 FORMAT( ' CCHKHB2STG: ', A, ' returned INFO=', I6, '.', / 9X, 'N=', - $ I6, ', JTYPE=', I6, ', ISEED=(', 3( I5, ',' ), I5, ')' ) + 9999 FORMAT( ' CCHKHB2STG: ', A, ' returned INFO=', I6, '.', / 9X, + $ 'N=', I6, ', JTYPE=', I6, ', ISEED=(', 3( I5, ',' ), I5, + $ ')' ) 9998 FORMAT( / 1X, A3, $ ' -- Complex Hermitian Banded Tridiagonal Reduction Routines' $ ) diff --git a/lapack-netlib/TESTING/EIG/dchksb2stg.f b/lapack-netlib/TESTING/EIG/dchksb2stg.f index 878da8b6f0..4e807f1c88 100644 --- a/lapack-netlib/TESTING/EIG/dchksb2stg.f +++ b/lapack-netlib/TESTING/EIG/dchksb2stg.f @@ -840,8 +840,9 @@ SUBROUTINE DCHKSB2STG( NSIZES, NN, NWDTHS, KK, NTYPES, DOTYPE, CALL DLASUM( 'DSB', NOUNIT, NERRS, NTESTT ) RETURN * - 9999 FORMAT( ' DCHKSB2STG: ', A, ' returned INFO=', I6, '.', / 9X, 'N=', - $ I6, ', JTYPE=', I6, ', ISEED=(', 3( I5, ',' ), I5, ')' ) + 9999 FORMAT( ' DCHKSB2STG: ', A, ' returned INFO=', I6, '.', / 9X, + $ 'N=', I6, ', JTYPE=', I6, ', ISEED=(', 3( I5, ',' ), I5, + $ ')' ) * 9998 FORMAT( / 1X, A3, $ ' -- Real Symmetric Banded Tridiagonal Reduction Routines' ) diff --git a/lapack-netlib/TESTING/EIG/schksb2stg.f b/lapack-netlib/TESTING/EIG/schksb2stg.f index 5de9204979..eee486ade7 100644 --- a/lapack-netlib/TESTING/EIG/schksb2stg.f +++ b/lapack-netlib/TESTING/EIG/schksb2stg.f @@ -840,8 +840,9 @@ SUBROUTINE SCHKSB2STG( NSIZES, NN, NWDTHS, KK, NTYPES, DOTYPE, CALL SLASUM( 'SSB', NOUNIT, NERRS, NTESTT ) RETURN * - 9999 FORMAT( ' SCHKSB2STG: ', A, ' returned INFO=', I6, '.', / 9X, 'N=', - $ I6, ', JTYPE=', I6, ', ISEED=(', 3( I5, ',' ), I5, ')' ) + 9999 FORMAT( ' SCHKSB2STG: ', A, ' returned INFO=', I6, '.', / 9X, + $ 'N=', I6, ', JTYPE=', I6, ', ISEED=(', 3( I5, ',' ), I5, + $ ')' ) * 9998 FORMAT( / 1X, A3, $ ' -- Real Symmetric Banded Tridiagonal Reduction Routines' ) diff --git a/lapack-netlib/TESTING/EIG/zchkhb2stg.f b/lapack-netlib/TESTING/EIG/zchkhb2stg.f index 786df7882c..bfe6ceadca 100644 --- a/lapack-netlib/TESTING/EIG/zchkhb2stg.f +++ b/lapack-netlib/TESTING/EIG/zchkhb2stg.f @@ -849,8 +849,9 @@ SUBROUTINE ZCHKHB2STG( NSIZES, NN, NWDTHS, KK, NTYPES, DOTYPE, CALL DLASUM( 'ZHB', NOUNIT, NERRS, NTESTT ) RETURN * - 9999 FORMAT( ' ZCHKHB2STG: ', A, ' returned INFO=', I6, '.', / 9X, 'N=', - $ I6, ', JTYPE=', I6, ', ISEED=(', 3( I5, ',' ), I5, ')' ) + 9999 FORMAT( ' ZCHKHB2STG: ', A, ' returned INFO=', I6, '.', / 9X, + $ 'N=', I6, ', JTYPE=', I6, ', ISEED=(', 3( I5, ',' ), I5, + $ ')' ) 9998 FORMAT( / 1X, A3, $ ' -- Complex Hermitian Banded Tridiagonal Reduction Routines' $ ) diff --git a/lapack-netlib/TESTING/LIN/alahd.f b/lapack-netlib/TESTING/LIN/alahd.f index 8f966c5841..c0334b5de9 100644 --- a/lapack-netlib/TESTING/LIN/alahd.f +++ b/lapack-netlib/TESTING/LIN/alahd.f @@ -954,7 +954,7 @@ SUBROUTINE ALAHD( IOUNIT, PATH ) $ 4X, '10. Random, Last columns are zero starting from', $ ' MINMN/2+1, CNDNUM = 2', / $ 4X, '11. Random, Half MINMN columns in the middle are', - $ ' zero starting from MINMN/2-(MINMN/2)/2+1,' + $ ' zero starting from MINMN/2-(MINMN/2)/2+1,', $ ' CNDNUM = 2', / $ 4X, '12. Random, Odd columns are ZERO, CNDNUM = 2', / $ 4X, '13. Random, Even columns are ZERO, CNDNUM = 2', / diff --git a/mkdocs.yml b/mkdocs.yml index 374b03e398..333344fe30 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -26,13 +26,18 @@ theme: plugins: - search + - mermaid2 - git-revision-date-localized: enable_creation_date: true markdown_extensions: - admonition - pymdownx.details - - pymdownx.superfences + - pymdownx.superfences: + custom_fences: + - name: mermaid + class: mermaid + format: !!python/name:mermaid2.fence_mermaid_custom - footnotes - pymdownx.tabbed: alternate_style: true @@ -46,6 +51,7 @@ nav: - extensions.md - developers.md - build_system.md + - runtime_variables.md - distributing.md - ci.md - about.md diff --git a/openblas.pc.in b/openblas.pc.in index d9bb845499..fe2f087208 100644 --- a/openblas.pc.in +++ b/openblas.pc.in @@ -2,6 +2,6 @@ Name: openblas Description: OpenBLAS is an optimized BLAS library based on GotoBLAS2 1.13 BSD version Version: ${version} URL: https://github.com/xianyi/OpenBLAS -Libs: -L${libdir} -l${libprefix}openblas${libnamesuffix} +Libs: -L${libdir} -l${libprefix}openblas${libsuffix}${libnamesuffix} Libs.private: ${extralib} Cflags: -I${includedir} ${omp_opt} diff --git a/param.h b/param.h index 51ebcbabbe..13c5c3ad0c 100644 --- a/param.h +++ b/param.h @@ -4039,6 +4039,8 @@ Until then, just keep it different than DGEMM_DEFAULT_UNROLL_N to keep copy rout #define CGEMM_DEFAULT_UNROLL_N 2 #define ZGEMM_DEFAULT_UNROLL_N 2 #define XGEMM_DEFAULT_UNROLL_N 1 +#define CGEMM3M_DEFAULT_UNROLL_N 2 +#define ZGEMM3M_DEFAULT_UNROLL_N 2 #ifdef ARCH_X86 #define SGEMM_DEFAULT_UNROLL_M 2 @@ -4054,6 +4056,18 @@ Until then, just keep it different than DGEMM_DEFAULT_UNROLL_N to keep copy rout #define CGEMM_DEFAULT_UNROLL_M 2 #define ZGEMM_DEFAULT_UNROLL_M 2 #define XGEMM_DEFAULT_UNROLL_M 1 +#define CGEMM3M_DEFAULT_UNROLL_M 2 +#define ZGEMM3M_DEFAULT_UNROLL_M 2 +#define CGEMM3M_DEFAULT_P 448 +#define ZGEMM3M_DEFAULT_P 224 +#define XGEMM3M_DEFAULT_P 112 +#define CGEMM3M_DEFAULT_Q 224 +#define ZGEMM3M_DEFAULT_Q 224 +#define XGEMM3M_DEFAULT_Q 224 +#define CGEMM3M_DEFAULT_R 12288 +#define ZGEMM3M_DEFAULT_R 12288 +#define XGEMM3M_DEFAULT_R 12288 + #endif #ifdef ARCH_MIPS diff --git a/utest/openblas_utest.h b/utest/openblas_utest.h index abe381a924..1851c60c56 100644 --- a/utest/openblas_utest.h +++ b/utest/openblas_utest.h @@ -36,7 +36,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include "ctest.h" - +#if defined(_MSC_VER) && defined(__INTEL_CLANG_COMPILER) +//#define LAPACK_COMPLEX_STRUCTURE +#define NOCHANGE +#endif #include #include diff --git a/utest/test_extensions/common.c b/utest/test_extensions/common.c index 808aa54557..a5d3196aaf 100644 --- a/utest/test_extensions/common.c +++ b/utest/test_extensions/common.c @@ -206,7 +206,7 @@ void ztranspose(blasint rows, blasint cols, double *alpha, double *a_src, int ld * param lda_dst - leading dimension of output matrix A * param conj specifies conjugation */ -void scopy(blasint rows, blasint cols, float alpha, float *a_src, int lda_src, +void my_scopy(blasint rows, blasint cols, float alpha, float *a_src, int lda_src, float *a_dst, blasint lda_dst) { blasint i, j; @@ -217,7 +217,7 @@ void scopy(blasint rows, blasint cols, float alpha, float *a_src, int lda_src, } } -void dcopy(blasint rows, blasint cols, double alpha, double *a_src, int lda_src, +void my_dcopy(blasint rows, blasint cols, double alpha, double *a_src, int lda_src, double *a_dst, blasint lda_dst) { blasint i, j; @@ -228,7 +228,7 @@ void dcopy(blasint rows, blasint cols, double alpha, double *a_src, int lda_src, } } -void ccopy(blasint rows, blasint cols, float *alpha, float *a_src, int lda_src, +void my_ccopy(blasint rows, blasint cols, float *alpha, float *a_src, int lda_src, float *a_dst, blasint lda_dst, int conj) { blasint i, j; @@ -243,7 +243,7 @@ void ccopy(blasint rows, blasint cols, float *alpha, float *a_src, int lda_src, } } -void zcopy(blasint rows, blasint cols, double *alpha, double *a_src, int lda_src, +void my_zcopy(blasint rows, blasint cols, double *alpha, double *a_src, int lda_src, double *a_dst, blasint lda_dst, int conj) { blasint i, j; diff --git a/utest/test_extensions/common.h b/utest/test_extensions/common.h index 62b84325c2..f8c60d2686 100644 --- a/utest/test_extensions/common.h +++ b/utest/test_extensions/common.h @@ -65,12 +65,12 @@ extern void ctranspose(blasint rows, blasint cols, float *alpha, float *a_src, i extern void ztranspose(blasint rows, blasint cols, double *alpha, double *a_src, int lda_src, double *a_dst, blasint lda_dst, int conj); -extern void scopy(blasint rows, blasint cols, float alpha, float *a_src, int lda_src, +extern void my_scopy(blasint rows, blasint cols, float alpha, float *a_src, int lda_src, float *a_dst, blasint lda_dst); -extern void dcopy(blasint rows, blasint cols, double alpha, double *a_src, int lda_src, +extern void my_dcopy(blasint rows, blasint cols, double alpha, double *a_src, int lda_src, double *a_dst, blasint lda_dst); -extern void ccopy(blasint rows, blasint cols, float *alpha, float *a_src, int lda_src, +extern void my_ccopy(blasint rows, blasint cols, float *alpha, float *a_src, int lda_src, float *a_dst, blasint lda_dst, int conj); -extern void zcopy(blasint rows, blasint cols, double *alpha, double *a_src, int lda_src, +extern void my_zcopy(blasint rows, blasint cols, double *alpha, double *a_src, int lda_src, double *a_dst, blasint lda_dst, int conj); -#endif \ No newline at end of file +#endif diff --git a/utest/test_extensions/test_cimatcopy.c b/utest/test_extensions/test_cimatcopy.c index 0c96a3b17c..41c0a0f6b0 100644 --- a/utest/test_extensions/test_cimatcopy.c +++ b/utest/test_extensions/test_cimatcopy.c @@ -91,7 +91,7 @@ static float check_cimatcopy(char api, char order, char trans, blasint rows, bla ctranspose(m, n, alpha, data_cimatcopy.a_test, lda_src, data_cimatcopy.a_verify, lda_dst, conj); } else { - ccopy(m, n, alpha, data_cimatcopy.a_test, lda_src, data_cimatcopy.a_verify, lda_dst, conj); + my_ccopy(m, n, alpha, data_cimatcopy.a_test, lda_src, data_cimatcopy.a_verify, lda_dst, conj); } if (api == 'F') { diff --git a/utest/test_extensions/test_comatcopy.c b/utest/test_extensions/test_comatcopy.c index b493c93a6f..dc6beeeaee 100644 --- a/utest/test_extensions/test_comatcopy.c +++ b/utest/test_extensions/test_comatcopy.c @@ -92,7 +92,7 @@ static float check_comatcopy(char api, char order, char trans, blasint rows, bla ctranspose(m, n, alpha, data_comatcopy.a_test, lda, data_comatcopy.b_verify, ldb, conj); } else { - ccopy(m, n, alpha, data_comatcopy.a_test, lda, data_comatcopy.b_verify, ldb, conj); + my_ccopy(m, n, alpha, data_comatcopy.a_test, lda, data_comatcopy.b_verify, ldb, conj); } if (api == 'F') { diff --git a/utest/test_extensions/test_dimatcopy.c b/utest/test_extensions/test_dimatcopy.c index eebb7669eb..f57707eeea 100644 --- a/utest/test_extensions/test_dimatcopy.c +++ b/utest/test_extensions/test_dimatcopy.c @@ -86,7 +86,7 @@ static double check_dimatcopy(char api, char order, char trans, blasint rows, bl dtranspose(m, n, alpha, data_dimatcopy.a_test, lda_src, data_dimatcopy.a_verify, lda_dst); } else { - dcopy(m, n, alpha, data_dimatcopy.a_test, lda_src, data_dimatcopy.a_verify, lda_dst); + my_dcopy(m, n, alpha, data_dimatcopy.a_test, lda_src, data_dimatcopy.a_verify, lda_dst); } if (api == 'F') { diff --git a/utest/test_extensions/test_domatcopy.c b/utest/test_extensions/test_domatcopy.c index e892271d2d..8869f7b453 100644 --- a/utest/test_extensions/test_domatcopy.c +++ b/utest/test_extensions/test_domatcopy.c @@ -87,7 +87,7 @@ static double check_domatcopy(char api, char order, char trans, blasint rows, bl dtranspose(m, n, alpha, data_domatcopy.a_test, lda, data_domatcopy.b_verify, ldb); } else { - dcopy(m, n, alpha, data_domatcopy.a_test, lda, data_domatcopy.b_verify, ldb); + my_dcopy(m, n, alpha, data_domatcopy.a_test, lda, data_domatcopy.b_verify, ldb); } if (api == 'F') { diff --git a/utest/test_extensions/test_simatcopy.c b/utest/test_extensions/test_simatcopy.c index c00ea0c8f0..6b70881bf9 100644 --- a/utest/test_extensions/test_simatcopy.c +++ b/utest/test_extensions/test_simatcopy.c @@ -86,7 +86,7 @@ static float check_simatcopy(char api, char order, char trans, blasint rows, bla stranspose(m, n, alpha, data_simatcopy.a_test, lda_src, data_simatcopy.a_verify, lda_dst); } else { - scopy(m, n, alpha, data_simatcopy.a_test, lda_src, data_simatcopy.a_verify, lda_dst); + my_scopy(m, n, alpha, data_simatcopy.a_test, lda_src, data_simatcopy.a_verify, lda_dst); } if (api == 'F') { diff --git a/utest/test_extensions/test_somatcopy.c b/utest/test_extensions/test_somatcopy.c index 62a6056d92..bcc2eabf51 100644 --- a/utest/test_extensions/test_somatcopy.c +++ b/utest/test_extensions/test_somatcopy.c @@ -87,7 +87,7 @@ static float check_somatcopy(char api, char order, char trans, blasint rows, bla stranspose(m, n, alpha, data_somatcopy.a_test, lda, data_somatcopy.b_verify, ldb); } else { - scopy(m, n, alpha, data_somatcopy.a_test, lda, data_somatcopy.b_verify, ldb); + my_scopy(m, n, alpha, data_somatcopy.a_test, lda, data_somatcopy.b_verify, ldb); } if (api == 'F') { diff --git a/utest/test_extensions/test_zimatcopy.c b/utest/test_extensions/test_zimatcopy.c index 86bc4670f2..349050b9c1 100644 --- a/utest/test_extensions/test_zimatcopy.c +++ b/utest/test_extensions/test_zimatcopy.c @@ -91,7 +91,7 @@ static double check_zimatcopy(char api, char order, char trans, blasint rows, bl ztranspose(m, n, alpha, data_zimatcopy.a_test, lda_src, data_zimatcopy.a_verify, lda_dst, conj); } else { - zcopy(m, n, alpha, data_zimatcopy.a_test, lda_src, data_zimatcopy.a_verify, lda_dst, conj); + my_zcopy(m, n, alpha, data_zimatcopy.a_test, lda_src, data_zimatcopy.a_verify, lda_dst, conj); } if (api == 'F') { diff --git a/utest/test_extensions/test_zomatcopy.c b/utest/test_extensions/test_zomatcopy.c index 208cfd981c..eb13d10830 100644 --- a/utest/test_extensions/test_zomatcopy.c +++ b/utest/test_extensions/test_zomatcopy.c @@ -92,7 +92,7 @@ static double check_zomatcopy(char api, char order, char trans, blasint rows, bl ztranspose(m, n, alpha, data_zomatcopy.a_test, lda, data_zomatcopy.b_verify, ldb, conj); } else { - zcopy(m, n, alpha, data_zomatcopy.a_test, lda, data_zomatcopy.b_verify, ldb, conj); + my_zcopy(m, n, alpha, data_zomatcopy.a_test, lda, data_zomatcopy.b_verify, ldb, conj); } if (api == 'F') { diff --git a/utest/test_rot.c b/utest/test_rot.c index 0e74ecbb36..e4ba44a034 100644 --- a/utest/test_rot.c +++ b/utest/test_rot.c @@ -53,6 +53,41 @@ CTEST(rot,drot_inc_0) ASSERT_DBL_NEAR_TOL(y2[i], y1[i], DOUBLE_EPS); } } +CTEST(rot,drot_inc_1) +{ + blasint i=0; + blasint N=4,incX=1,incY=1; + double c=1.0,s=1.0; + double x1[]={1.0,3.0,5.0,7.0}; + double y1[]={2.0,4.0,6.0,8.0}; + double x2[]={3.0,7.0,11.0,15.0}; + double y2[]={1.0,1.0,1.0,1.0}; + + BLASFUNC(drot)(&N,x1,&incX,y1,&incY,&c,&s); + + for(i=0; i