Skip to content

Commit

Permalink
Merge pull request #2030 from JuliaGPU/tb/fastmath
Browse files Browse the repository at this point in the history
Add support for @cuda fastmath
  • Loading branch information
maleadt authored Aug 17, 2023
2 parents ac74718 + a2d3219 commit fade845
Show file tree
Hide file tree
Showing 4 changed files with 31 additions and 4 deletions.
4 changes: 2 additions & 2 deletions Manifest.toml
Original file line number Diff line number Diff line change
Expand Up @@ -142,9 +142,9 @@ version = "0.1.5"

[[GPUCompiler]]
deps = ["ExprTools", "InteractiveUtils", "LLVM", "Libdl", "Logging", "Scratch", "TimerOutputs", "UUIDs"]
git-tree-sha1 = "72b2e3c2ba583d1a7aa35129e56cf92e07c083e3"
git-tree-sha1 = "8de395b1243771bbb79ac832ec96c7def7a4586f"
uuid = "61eb1bfa-7361-4325-ad38-22787b887f55"
version = "0.21.4"
version = "0.22.0"

[[InlineStrings]]
deps = ["Parsers"]
Expand Down
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ Crayons = "4"
DataFrames = "1"
ExprTools = "0.1"
GPUArrays = "8.6"
GPUCompiler = "0.21"
GPUCompiler = "0.22"
KernelAbstractions = "0.9.2"
LLVM = "6"
Preferences = "1"
Expand Down
3 changes: 2 additions & 1 deletion src/compiler/execution.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ export @cuda, cudaconvert, cufunction, dynamic_cufunction, nextwarp, prevwarp
## high-level @cuda interface

const MACRO_KWARGS = [:dynamic, :launch]
const COMPILER_KWARGS = [:kernel, :name, :always_inline, :minthreads, :maxthreads, :blocks_per_sm, :maxregs]
const COMPILER_KWARGS = [:kernel, :name, :always_inline, :minthreads, :maxthreads, :blocks_per_sm, :maxregs, :fastmath]
const LAUNCH_KWARGS = [:cooperative, :blocks, :threads, :shmem, :stream]


Expand Down Expand Up @@ -306,6 +306,7 @@ The following keyword arguments are supported:
supported on LLVM 4.0+)
- `name`: override the name that the kernel will have in the generated code
- `always_inline`: inline all function calls in the kernel
- `fastmath`: use less precise square roots and flush denormals
The output of this function is automatically cached, i.e. you can simply call `cufunction`
in a hot path without degrading performance. New code will be generated automatically, when
Expand Down
26 changes: 26 additions & 0 deletions test/core/codegen.jl
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,32 @@ end
@test !occursin(".local", asm)
end

@testset "fastmath" begin
function div_kernel(x)
i = threadIdx().x
@fastmath @inbounds x[i] = 1 / x[i]
return
end

asm = sprint(io->CUDA.code_ptx(io, div_kernel, Tuple{CuDeviceArray{Float32,1,AS.Global}}; fastmath=true))
@test occursin("div.approx.ftz", asm)

# libdevice only contains fast math versions of sqrt for CUDA 11.1+
if CUDA.runtime_version() >= v"11.1"
function sqrt_kernel(x)
i = threadIdx().x
@inbounds x[i] = sqrt(x[i])
return
end

asm = sprint(io->CUDA.code_ptx(io, sqrt_kernel, Tuple{CuDeviceArray{Float32,1,AS.Global}}))
@test occursin("sqrt.r", asm)

asm = sprint(io->CUDA.code_ptx(io, sqrt_kernel, Tuple{CuDeviceArray{Float32,1,AS.Global}}; fastmath=true))
@test occursin("sqrt.approx.ftz", asm)
end
end

end

############################################################################################
Expand Down

0 comments on commit fade845

Please sign in to comment.