Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add support for HIP/rocm. #10

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions include/Random123/array.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ inline R123_CUDA_DEVICE value_type assemble_from_u32(uint32_t *p32){

/** @endcond */

#ifdef __CUDA_ARCH__
#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
/* CUDA can't handle std::reverse_iterator. We *could* implement it
ourselves, but let's not bother until somebody really feels a need
to reverse-iterate through an r123array */
Expand Down Expand Up @@ -114,8 +114,8 @@ inline R123_CUDA_DEVICE value_type assemble_from_u32(uint32_t *p32){
enum {static_size = _N}; \
R123_CUDA_DEVICE reference operator[](size_type i){return v[i];} \
R123_CUDA_DEVICE const_reference operator[](size_type i) const {return v[i];} \
R123_CUDA_DEVICE reference at(size_type i){ if(i >= _N) R123_THROW(std::out_of_range("array index out of range")); return (*this)[i]; } \
R123_CUDA_DEVICE const_reference at(size_type i) const { if(i >= _N) R123_THROW(std::out_of_range("array index out of range")); return (*this)[i]; } \
R123_CUDA_DEVICE reference at(size_type i){ if(i >= _N) {R123_THROW(std::out_of_range("array index out of range"));}; return (*this)[i]; } \
R123_CUDA_DEVICE const_reference at(size_type i) const { if(i >= _N) {R123_THROW(std::out_of_range("array index out of range"));}; return (*this)[i]; } \
R123_CUDA_DEVICE size_type size() const { return _N; } \
R123_CUDA_DEVICE size_type max_size() const { return _N; } \
R123_CUDA_DEVICE bool empty() const { return _N==0; }; \
Expand Down
2 changes: 1 addition & 1 deletion include/Random123/boxmuller.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

namespace r123{

#if !defined(__CUDACC__)
#if !(defined(__CUDACC__) || defined(__HIPCC__))
typedef struct { float x, y; } float2;
typedef struct { double x, y; } double2;
#else
Expand Down
2 changes: 2 additions & 0 deletions include/Random123/features/compilerfeatures.h
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,8 @@ added to each of the *features.h files, AND to examples/ut_features.cpp.
#include "openclfeatures.h"
#elif defined(__CUDACC__)
#include "nvccfeatures.h"
#elif defined(__HIPCC__)
#include "hipfeatures.h"
#elif defined(__ICC)
#include "iccfeatures.h"
#elif defined(__xlC__) || defined(__ibmxl__)
Expand Down
129 changes: 129 additions & 0 deletions include/Random123/features/hipfeatures.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
/*
Copyright 2010-2011, D. E. Shaw Research.
All rights reserved.

Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:

* Redistributions of source code must retain the above copyright
notice, this list of conditions, and the following disclaimer.

* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions, and the following disclaimer in the
documentation and/or other materials provided with the distribution.

* Neither the name of D. E. Shaw Research nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __r123_hip_features_dot_h__
#define __r123_hip_features_dot_h__

#if !(defined(CUDART_VERSION) || defined(HIP_INCLUDE_HIP_HIP_RUNTIME_API_H))
#error "why are we in hipfeatures.h if neither CUDART_VERSION NOR HIP_PLATFORM?"
#endif

#if CUDART_VERSION < 4010 && !defined(HIP_INCLUDE_HIP_HIP_RUNTIME_API_H)
#error "CUDA versions earlier than 4.1 produce incorrect results for some templated functions in namespaces. Random123 is unsupported. See comments in nvccfeatures.h"
// This test was added in Random123-1.08 (August, 2013) because we
// discovered that Ftype(maxTvalue<T>()) with Ftype=double and
// T=uint64_t in examples/uniform.hpp produces -1 for CUDA4.0 and
// earlier. We can't be sure this bug doesn't also affect invocations
// of other templated functions, e.g., essentially all of Random123.
// Thus, we no longer trust CUDA versions earlier than 4.1 even though
// we had previously tested and timed Random123 with CUDA 3.x and 4.0.
// If you feel lucky or desperate, you can change #error to #warning, but
// please take extra care to be sure that you are getting correct
// results.
#endif

// nvcc falls through to gcc or msvc. So first define
// a couple of things and then include either gccfeatures.h
// or msvcfeatures.h

//#ifdef __CUDA_ARCH__ allows Philox32 and Philox64 to be compiled
//for both device and host functions in CUDA by setting compiler flags
//for the device function
#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
#ifndef R123_CUDA_DEVICE
#define R123_CUDA_DEVICE __host__ __device__
#endif

#ifndef R123_USE_MULHILO64_CUDA_INTRIN
#define R123_USE_MULHILO64_CUDA_INTRIN 1
#endif

#ifndef R123_THROW
// No exceptions in CUDA, at least upto 4.0
#define R123_THROW(x) R123_ASSERT(0)
#endif

#ifndef R123_ASSERT
# if defined(__CUDA_ARCH__)
# define R123_ASSERT(x) if((x)); else asm("trap;")
# elif defined(__HIP_DEVICE_COMPILE__)
# define R123_ASSERT(x) if((x)); else asm("s_trap 2;")
# endif
#endif

#else // ! ( defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__) )
// If we're using nvcc not compiling for the CUDA architecture,
// then we must be compiling for the host. In that case,
// tell the philox code to use the mulhilo64 asm because
// nvcc doesn't grok uint128_t.
#ifndef R123_USE_MULHILO64_ASM
#define R123_USE_MULHILO64_ASM 1
#endif

#endif // __CUDA_ARCH__

#ifndef R123_BUILTIN_EXPECT
#define R123_BUILTIN_EXPECT(expr,likely) expr
#endif

#ifndef R123_USE_AES_NI
#define R123_USE_AES_NI 0
#endif

#ifndef R123_USE_SSE4_2
#define R123_USE_SSE4_2 0
#endif

#ifndef R123_USE_SSE4_1
#define R123_USE_SSE4_1 0
#endif

#ifndef R123_USE_SSE
#define R123_USE_SSE 0
#endif

#ifndef R123_USE_GNU_UINT128
#define R123_USE_GNU_UINT128 0
#endif

#ifndef R123_ULONG_LONG
// uint64_t, which is what we'd get without this, is
// not the same as unsigned long long
#define R123_ULONG_LONG unsigned long long
#endif

#if defined(__GNUC__)
#include "gccfeatures.h"
#elif defined(_MSC_FULL_VER)
#include "msvcfeatures.h"
#endif

#endif
2 changes: 1 addition & 1 deletion include/Random123/uniform.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ R123_MK_SIGNED_UNSIGNED(__int128_t, __uint128_t);
#undef R123_MK_SIGNED_UNSIGNED
#endif

#if defined(__CUDACC__) || defined(_LIBCPP_HAS_NO_CONSTEXPR)
#if defined(__CUDACC__) || defined(_LIBCPP_HAS_NO_CONSTEXPR) || defined(__HIPCC__)
// Amazing! cuda thinks numeric_limits::max() is a __host__ function, so
// we can't use it in a device function.
//
Expand Down