From 708cd664fc6b4e42ebead26ad7e5079cada023b3 Mon Sep 17 00:00:00 2001 From: Lucian Radu Teodorescu Date: Mon, 2 Dec 2024 16:54:33 +0200 Subject: [PATCH 01/12] Add intrinsic support for atomic store and load. Also add `void` intrinsic type. --- Sources/CodeGen/LLVM/ConcreteTypeLayout.swift | 2 ++ Sources/CodeGen/LLVM/TypeLowering.swift | 2 ++ Sources/FrontEnd/NativeInstruction.swift | 36 +++++++++++++++++++ Sources/FrontEnd/Types/BuiltinType.swift | 7 ++++ Sources/IR/Mangling/Demangler.swift | 2 ++ Sources/IR/Mangling/Mangler.swift | 2 ++ Sources/IR/Mangling/ManglingOperator.swift | 2 ++ 7 files changed, 53 insertions(+) diff --git a/Sources/CodeGen/LLVM/ConcreteTypeLayout.swift b/Sources/CodeGen/LLVM/ConcreteTypeLayout.swift index 3be0fa122..ea21b848f 100644 --- a/Sources/CodeGen/LLVM/ConcreteTypeLayout.swift +++ b/Sources/CodeGen/LLVM/ConcreteTypeLayout.swift @@ -71,6 +71,8 @@ struct ConcreteTypeLayout { self.init(size: 8, alignment: 8) case .float128: self.init(size: 16, alignment: 8) + case .void: + notLLVMRepresentable(^t) // Cannot be used in a type layout. case .module: notLLVMRepresentable(^t) } diff --git a/Sources/CodeGen/LLVM/TypeLowering.swift b/Sources/CodeGen/LLVM/TypeLowering.swift index 2af3bacf4..797ebb529 100644 --- a/Sources/CodeGen/LLVM/TypeLowering.swift +++ b/Sources/CodeGen/LLVM/TypeLowering.swift @@ -74,6 +74,8 @@ extension IR.Program { return SwiftyLLVM.FloatingPointType.fp128(in: &module) case .ptr: return module.ptr + case .void: + return SwiftyLLVM.VoidType(in: &module) case .module: notLLVMRepresentable(t) } diff --git a/Sources/FrontEnd/NativeInstruction.swift b/Sources/FrontEnd/NativeInstruction.swift index 5435e70ab..505d56e19 100644 --- a/Sources/FrontEnd/NativeInstruction.swift +++ b/Sources/FrontEnd/NativeInstruction.swift @@ -118,6 +118,18 @@ public enum NativeInstruction: Hashable { // Corresponding LLVM instruction: get_elementptr_inbounds. case advancedByBytes(byteOffset: BuiltinType) + case atomic_store_relaxed(BuiltinType) + + case atomic_store_release(BuiltinType) + + case atomic_store_seqcst(BuiltinType) + + case atomic_load_relaxed(BuiltinType) + + case atomic_load_acquire(BuiltinType) + + case atomic_load_seqcst(BuiltinType) + /// The parameters of a floating-point LLVM instruction. public struct MathFlags: OptionSet, Hashable { @@ -238,6 +250,18 @@ extension NativeInstruction { return .init(to: ^t) case .advancedByBytes(let byteOffset): return .init(.builtin(.ptr), ^byteOffset, to: .builtin(.ptr)) + case .atomic_store_relaxed(let t): + return .init(.builtin(.ptr), ^t, to: .builtin(.void)) + case .atomic_store_release(let t): + return .init(.builtin(.ptr), ^t, to: .builtin(.void)) + case .atomic_store_seqcst(let t): + return .init(.builtin(.ptr), ^t, to: .builtin(.void)) + case .atomic_load_relaxed(let t): + return .init(.builtin(.ptr), to: ^t) + case .atomic_load_acquire(let t): + return .init(.builtin(.ptr), to: ^t) + case .atomic_load_seqcst(let t): + return .init(.builtin(.ptr), to: ^t) } } @@ -331,6 +355,18 @@ extension NativeInstruction: CustomStringConvertible { return "zeroinitializer_\(t)" case .advancedByBytes(let t): return "advanced_by_bytes_\(t)" + case .atomic_store_relaxed(let t): + return "atomic_store_relaxed_\(t)" + case .atomic_store_release(let t): + return "atomic_store_release_\(t)" + case .atomic_store_seqcst(let t): + return "atomic_store_seqcst_\(t)" + case .atomic_load_relaxed(let t): + return "atomic_load_relaxed_\(t)" + case .atomic_load_acquire(let t): + return "atomic_load_acquire_\(t)" + case .atomic_load_seqcst(let t): + return "atomic_load_seqcst_\(t)" } } diff --git a/Sources/FrontEnd/Types/BuiltinType.swift b/Sources/FrontEnd/Types/BuiltinType.swift index 95581f214..e5ae0dc16 100644 --- a/Sources/FrontEnd/Types/BuiltinType.swift +++ b/Sources/FrontEnd/Types/BuiltinType.swift @@ -25,6 +25,9 @@ public enum BuiltinType: TypeProtocol { /// A built-in opaque pointer. case ptr + /// A built-in void type. + case void + /// The type of the built-in module. case module @@ -63,6 +66,8 @@ extension BuiltinType: CustomStringConvertible { return "float128" case .ptr: return "ptr" + case .void: + return "void" case .module: return "Builtin" } @@ -86,6 +91,8 @@ extension BuiltinType: LosslessStringConvertible { self = .float128 case "ptr": self = .ptr + case "void": + self = .void case "Builtin": self = .module diff --git a/Sources/IR/Mangling/Demangler.swift b/Sources/IR/Mangling/Demangler.swift index 47a4a44c8..c32a6be69 100644 --- a/Sources/IR/Mangling/Demangler.swift +++ b/Sources/IR/Mangling/Demangler.swift @@ -42,6 +42,8 @@ struct Demangler { demangled = takeBuiltinFloatType(from: &stream) case .builtinPointerType: demangled = .type(.builtin(.ptr)) + case .builtinVoidType: + demangled = .type(.builtin(.void)) case .builtinModuleType: demangled = .type(.builtin(.module)) case .builtinWordType: diff --git a/Sources/IR/Mangling/Mangler.swift b/Sources/IR/Mangling/Mangler.swift index 8715d5a6d..9609fcdb7 100644 --- a/Sources/IR/Mangling/Mangler.swift +++ b/Sources/IR/Mangling/Mangler.swift @@ -560,6 +560,8 @@ struct Mangler { append(integer: 128, to: &output) case .ptr: append(operator: .builtinPointerType, to: &output) + case .void: + append(operator: .builtinVoidType, to: &output) case .module: append(operator: .builtinModuleType, to: &output) } diff --git a/Sources/IR/Mangling/ManglingOperator.swift b/Sources/IR/Mangling/ManglingOperator.swift index 337ba7417..127324a44 100644 --- a/Sources/IR/Mangling/ManglingOperator.swift +++ b/Sources/IR/Mangling/ManglingOperator.swift @@ -72,6 +72,8 @@ public enum ManglingOperator: String { case builtinPointerType = "bpT" + case builtinVoidType = "bvT" + case builtinModuleType = "bmT" case builtinWordType = "bwT" From b67d41f9fa3d74c1df50f994df0ee23e6e617e95 Mon Sep 17 00:00:00 2001 From: Lucian Radu Teodorescu Date: Tue, 3 Dec 2024 19:46:58 +0200 Subject: [PATCH 02/12] Also add atomic native function support in the frontend parser. --- Sources/FrontEnd/BuiltinFunction.swift | 31 ++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/Sources/FrontEnd/BuiltinFunction.swift b/Sources/FrontEnd/BuiltinFunction.swift index 976d3e4f6..41635bf9f 100644 --- a/Sources/FrontEnd/BuiltinFunction.swift +++ b/Sources/FrontEnd/BuiltinFunction.swift @@ -258,6 +258,31 @@ extension BuiltinFunction { guard let t = builtinType(&tokens) else { return nil } self = .init(name: .llvm(.zeroinitializer(t))) + case "atomic": + self.init(atomic: n) + + default: + return nil + } + } + + /// Creates an atomic built-in function named `n` or returns `nil` if `n` isn't a valid atomic builtin name. + private init?(atomic n: String) { + guard let (fs, ts) = splitLastUnderscore(n) else { return nil } + guard let t = BuiltinType.init(ts) else { return nil } + switch fs { + case "atomic_store_relaxed": + self = .init(name: .llvm(.atomic_store_relaxed(t))) + case "atomic_store_release": + self = .init(name: .llvm(.atomic_store_release(t))) + case "atomic_store_seqcst": + self = .init(name: .llvm(.atomic_store_seqcst(t))) + case "atomic_load_relaxed": + self = .init(name: .llvm(.atomic_load_relaxed(t))) + case "atomic_load_acquire": + self = .init(name: .llvm(.atomic_load_acquire(t))) + case "atomic_load_seqcst": + self = .init(name: .llvm(.atomic_load_seqcst(t))) default: return nil } @@ -335,6 +360,12 @@ private func take( } } +/// Splits `s` into a pair `(prefix, suffix)` at the last underscore character, or returns `nil`. +private func splitLastUnderscore(_ s: String) -> (String, String)? { + guard let i = s.lastIndex(of: "_") else { return nil } + return (String(s[..) -> BuiltinType? { stream.popFirst().flatMap(BuiltinType.init(_:)) From c4a410c618b9e32432360f7b5e3cfbf040986c5e Mon Sep 17 00:00:00 2001 From: Lucian Radu Teodorescu Date: Fri, 6 Dec 2024 22:06:42 +0200 Subject: [PATCH 03/12] Add LLVM transpilation for atomics. This is dependent on a Swifty-LLVM change to add support for atomic ordering. --- Sources/CodeGen/LLVM/Transpilation.swift | 39 ++++++++++++++++++++++++ Sources/FrontEnd/NativeInstruction.swift | 6 ++-- 2 files changed, 42 insertions(+), 3 deletions(-) diff --git a/Sources/CodeGen/LLVM/Transpilation.swift b/Sources/CodeGen/LLVM/Transpilation.swift index d75d90e5e..91167c932 100644 --- a/Sources/CodeGen/LLVM/Transpilation.swift +++ b/Sources/CodeGen/LLVM/Transpilation.swift @@ -1126,6 +1126,45 @@ extension SwiftyLLVM.Module { register[.register(i)] = insertGetElementPointerInBounds( of: base, typed: i8, indices: [byteOffset], at: insertionPoint) + case .atomic_load_relaxed: + let source = llvm(s.operands[0]) + let l = insertLoad(ptr, from: source, at: insertionPoint) + setOrdering(.monotonic, for: l) + register[.register(i)] = l + + case .atomic_load_acquire: + let source = llvm(s.operands[0]) + let l = insertLoad(ptr, from: source, at: insertionPoint) + setOrdering(.acquire, for: l) + register[.register(i)] = l + + case .atomic_load_seqcst: + let source = llvm(s.operands[0]) + let l = insertLoad(ptr, from: source, at: insertionPoint) + setOrdering(.sequentiallyConsistent, for: l) + register[.register(i)] = l + + case .atomic_store_relaxed: + let target = llvm(s.operands[0]) + let value = llvm(s.operands[1]) + let s = insertStore(value, to: target, at: insertionPoint) + setOrdering(.monotonic, for: s) + register[.register(i)] = target + + case .atomic_store_release: + let target = llvm(s.operands[0]) + let value = llvm(s.operands[1]) + let s = insertStore(value, to: target, at: insertionPoint) + setOrdering(.release, for: s) + register[.register(i)] = target + + case .atomic_store_seqcst: + let target = llvm(s.operands[0]) + let value = llvm(s.operands[1]) + let s = insertStore(value, to: target, at: insertionPoint) + setOrdering(.sequentiallyConsistent, for: s) + register[.register(i)] = target + default: unreachable("unexpected LLVM instruction '\(s.instruction)'") } diff --git a/Sources/FrontEnd/NativeInstruction.swift b/Sources/FrontEnd/NativeInstruction.swift index 505d56e19..b269163f6 100644 --- a/Sources/FrontEnd/NativeInstruction.swift +++ b/Sources/FrontEnd/NativeInstruction.swift @@ -251,11 +251,11 @@ extension NativeInstruction { case .advancedByBytes(let byteOffset): return .init(.builtin(.ptr), ^byteOffset, to: .builtin(.ptr)) case .atomic_store_relaxed(let t): - return .init(.builtin(.ptr), ^t, to: .builtin(.void)) + return .init(.builtin(.ptr), ^t, to: .builtin(.ptr)) case .atomic_store_release(let t): - return .init(.builtin(.ptr), ^t, to: .builtin(.void)) + return .init(.builtin(.ptr), ^t, to: .builtin(.ptr)) case .atomic_store_seqcst(let t): - return .init(.builtin(.ptr), ^t, to: .builtin(.void)) + return .init(.builtin(.ptr), ^t, to: .builtin(.ptr)) case .atomic_load_relaxed(let t): return .init(.builtin(.ptr), to: ^t) case .atomic_load_acquire(let t): From f20f8723df7036bfadf03e2198340e4dc05a7092 Mon Sep 17 00:00:00 2001 From: Lucian Radu Teodorescu Date: Fri, 6 Dec 2024 22:18:37 +0200 Subject: [PATCH 04/12] Revert adding builtin void type --- Sources/CodeGen/LLVM/ConcreteTypeLayout.swift | 2 -- Sources/CodeGen/LLVM/TypeLowering.swift | 2 -- Sources/FrontEnd/Types/BuiltinType.swift | 7 ------- Sources/IR/Mangling/Demangler.swift | 2 -- Sources/IR/Mangling/Mangler.swift | 2 -- Sources/IR/Mangling/ManglingOperator.swift | 2 -- 6 files changed, 17 deletions(-) diff --git a/Sources/CodeGen/LLVM/ConcreteTypeLayout.swift b/Sources/CodeGen/LLVM/ConcreteTypeLayout.swift index ea21b848f..3be0fa122 100644 --- a/Sources/CodeGen/LLVM/ConcreteTypeLayout.swift +++ b/Sources/CodeGen/LLVM/ConcreteTypeLayout.swift @@ -71,8 +71,6 @@ struct ConcreteTypeLayout { self.init(size: 8, alignment: 8) case .float128: self.init(size: 16, alignment: 8) - case .void: - notLLVMRepresentable(^t) // Cannot be used in a type layout. case .module: notLLVMRepresentable(^t) } diff --git a/Sources/CodeGen/LLVM/TypeLowering.swift b/Sources/CodeGen/LLVM/TypeLowering.swift index 797ebb529..2af3bacf4 100644 --- a/Sources/CodeGen/LLVM/TypeLowering.swift +++ b/Sources/CodeGen/LLVM/TypeLowering.swift @@ -74,8 +74,6 @@ extension IR.Program { return SwiftyLLVM.FloatingPointType.fp128(in: &module) case .ptr: return module.ptr - case .void: - return SwiftyLLVM.VoidType(in: &module) case .module: notLLVMRepresentable(t) } diff --git a/Sources/FrontEnd/Types/BuiltinType.swift b/Sources/FrontEnd/Types/BuiltinType.swift index e5ae0dc16..95581f214 100644 --- a/Sources/FrontEnd/Types/BuiltinType.swift +++ b/Sources/FrontEnd/Types/BuiltinType.swift @@ -25,9 +25,6 @@ public enum BuiltinType: TypeProtocol { /// A built-in opaque pointer. case ptr - /// A built-in void type. - case void - /// The type of the built-in module. case module @@ -66,8 +63,6 @@ extension BuiltinType: CustomStringConvertible { return "float128" case .ptr: return "ptr" - case .void: - return "void" case .module: return "Builtin" } @@ -91,8 +86,6 @@ extension BuiltinType: LosslessStringConvertible { self = .float128 case "ptr": self = .ptr - case "void": - self = .void case "Builtin": self = .module diff --git a/Sources/IR/Mangling/Demangler.swift b/Sources/IR/Mangling/Demangler.swift index c32a6be69..47a4a44c8 100644 --- a/Sources/IR/Mangling/Demangler.swift +++ b/Sources/IR/Mangling/Demangler.swift @@ -42,8 +42,6 @@ struct Demangler { demangled = takeBuiltinFloatType(from: &stream) case .builtinPointerType: demangled = .type(.builtin(.ptr)) - case .builtinVoidType: - demangled = .type(.builtin(.void)) case .builtinModuleType: demangled = .type(.builtin(.module)) case .builtinWordType: diff --git a/Sources/IR/Mangling/Mangler.swift b/Sources/IR/Mangling/Mangler.swift index 9609fcdb7..8715d5a6d 100644 --- a/Sources/IR/Mangling/Mangler.swift +++ b/Sources/IR/Mangling/Mangler.swift @@ -560,8 +560,6 @@ struct Mangler { append(integer: 128, to: &output) case .ptr: append(operator: .builtinPointerType, to: &output) - case .void: - append(operator: .builtinVoidType, to: &output) case .module: append(operator: .builtinModuleType, to: &output) } diff --git a/Sources/IR/Mangling/ManglingOperator.swift b/Sources/IR/Mangling/ManglingOperator.swift index 127324a44..337ba7417 100644 --- a/Sources/IR/Mangling/ManglingOperator.swift +++ b/Sources/IR/Mangling/ManglingOperator.swift @@ -72,8 +72,6 @@ public enum ManglingOperator: String { case builtinPointerType = "bpT" - case builtinVoidType = "bvT" - case builtinModuleType = "bmT" case builtinWordType = "bwT" From 71c6d2a718890f01303277a3db4b591a846140ea Mon Sep 17 00:00:00 2001 From: Lucian Radu Teodorescu Date: Sat, 7 Dec 2024 10:22:34 +0200 Subject: [PATCH 05/12] Add unit tests --- Tests/HyloTests/BuiltinFunctionTests.swift | 24 ++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/Tests/HyloTests/BuiltinFunctionTests.swift b/Tests/HyloTests/BuiltinFunctionTests.swift index 233f883c3..0c3b95e5d 100644 --- a/Tests/HyloTests/BuiltinFunctionTests.swift +++ b/Tests/HyloTests/BuiltinFunctionTests.swift @@ -206,6 +206,30 @@ final class BuiltinFunctionTests: XCTestCase { createInstanceWithType: expectedType) } + func testAtomicLoad() throws { + let expectedType = ArrowType(.builtin(.ptr), to: .builtin(.i(64))) + try assertParse( + instructions: ["atomic_load"], + parameterizedBy: [ + ["relaxed", "i64"], + ["acquire", "i64"], + ["seqcst", "i64"] + ], + createInstanceWithType: expectedType) + } + + func testAtomicStore() throws { + let expectedType = ArrowType(.builtin(.ptr), .builtin(.i(64)), to: .builtin(.ptr)) + try assertParse( + instructions: ["atomic_store"], + parameterizedBy: [ + ["relaxed", "i64"], + ["release", "i64"], + ["seqcst", "i64"] + ], + createInstanceWithType: expectedType) + } + /// For each element in `instructions` and `parameters`, assert that parsing a built-in functions /// named after their concatenation creates an instance with the same stem and parameters, and /// whose type is `expectedType`. From 6834ce5a5555088ce0d1ced10d1c9c31c35cc19f Mon Sep 17 00:00:00 2001 From: Lucian Radu Teodorescu Date: Sun, 8 Dec 2024 17:49:50 +0200 Subject: [PATCH 06/12] Add extra atomic operations, beside load and store. --- Sources/CodeGen/LLVM/Transpilation.swift | 371 +++++++++++++ Sources/FrontEnd/BuiltinFunction.swift | 270 +++++++++- Sources/FrontEnd/NativeInstruction.swift | 594 ++++++++++++++++++++- Tests/HyloTests/BuiltinFunctionTests.swift | 104 ++++ 4 files changed, 1312 insertions(+), 27 deletions(-) diff --git a/Sources/CodeGen/LLVM/Transpilation.swift b/Sources/CodeGen/LLVM/Transpilation.swift index 91167c932..19b594d1d 100644 --- a/Sources/CodeGen/LLVM/Transpilation.swift +++ b/Sources/CodeGen/LLVM/Transpilation.swift @@ -1165,11 +1165,382 @@ extension SwiftyLLVM.Module { setOrdering(.sequentiallyConsistent, for: s) register[.register(i)] = target + case .atomic_swap_relaxed: + insert(atomicRMW: .xchg, ordering: .monotonic, for: i) + + case .atomic_swap_acquire: + insert(atomicRMW: .xchg, ordering: .acquire, for: i) + + case .atomic_swap_release: + insert(atomicRMW: .xchg, ordering: .release, for: i) + + case .atomic_swap_acqrel: + insert(atomicRMW: .xchg, ordering: .acquireRelease, for: i) + + case .atomic_swap_seqcst: + insert(atomicRMW: .xchg, ordering: .sequentiallyConsistent, for: i) + + case .atomic_add_relaxed: + insert(atomicRMW: .add, ordering: .monotonic, for: i) + + case .atomic_add_acquire: + insert(atomicRMW: .add, ordering: .acquire, for: i) + + case .atomic_add_release: + insert(atomicRMW: .add, ordering: .release, for: i) + + case .atomic_add_acqrel: + insert(atomicRMW: .add, ordering: .acquireRelease, for: i) + + case .atomic_add_seqcst: + insert(atomicRMW: .add, ordering: .sequentiallyConsistent, for: i) + + case .atomic_fadd_relaxed: + insert(atomicRMW: .fAdd, ordering: .monotonic, for: i) + + case .atomic_fadd_acquire: + insert(atomicRMW: .fAdd, ordering: .acquire, for: i) + + case .atomic_fadd_release: + insert(atomicRMW: .fAdd, ordering: .release, for: i) + + case .atomic_fadd_acqrel: + insert(atomicRMW: .fAdd, ordering: .acquireRelease, for: i) + + case .atomic_fadd_seqcst: + insert(atomicRMW: .fAdd, ordering: .sequentiallyConsistent, for: i) + + case .atomic_sub_relaxed: + insert(atomicRMW: .sub, ordering: .monotonic, for: i) + + case .atomic_sub_acquire: + insert(atomicRMW: .sub, ordering: .acquire, for: i) + + case .atomic_sub_release: + insert(atomicRMW: .sub, ordering: .release, for: i) + + case .atomic_sub_acqrel: + insert(atomicRMW: .sub, ordering: .acquireRelease, for: i) + + case .atomic_sub_seqcst: + insert(atomicRMW: .sub, ordering: .sequentiallyConsistent, for: i) + + case .atomic_fsub_relaxed: + insert(atomicRMW: .fSub, ordering: .monotonic, for: i) + + case .atomic_fsub_acquire: + insert(atomicRMW: .fSub, ordering: .acquire, for: i) + + case .atomic_fsub_release: + insert(atomicRMW: .fSub, ordering: .release, for: i) + + case .atomic_fsub_acqrel: + insert(atomicRMW: .fSub, ordering: .acquireRelease, for: i) + + case .atomic_fsub_seqcst: + insert(atomicRMW: .fSub, ordering: .sequentiallyConsistent, for: i) + + case .atomic_max_relaxed: + insert(atomicRMW: .max, ordering: .monotonic, for: i) + + case .atomic_max_acquire: + insert(atomicRMW: .max, ordering: .acquire, for: i) + + case .atomic_max_release: + insert(atomicRMW: .max, ordering: .release, for: i) + + case .atomic_max_acqrel: + insert(atomicRMW: .max, ordering: .acquireRelease, for: i) + + case .atomic_max_seqcst: + insert(atomicRMW: .max, ordering: .sequentiallyConsistent, for: i) + + case .atomic_umax_relaxed: + insert(atomicRMW: .uMax, ordering: .monotonic, for: i) + + case .atomic_umax_acquire: + insert(atomicRMW: .uMax, ordering: .acquire, for: i) + + case .atomic_umax_release: + insert(atomicRMW: .uMax, ordering: .release, for: i) + + case .atomic_umax_acqrel: + insert(atomicRMW: .uMax, ordering: .acquireRelease, for: i) + + case .atomic_umax_seqcst: + insert(atomicRMW: .uMax, ordering: .sequentiallyConsistent, for: i) + + case .atomic_fmax_relaxed: + insert(atomicRMW: .fMax, ordering: .monotonic, for: i) + + case .atomic_fmax_acquire: + insert(atomicRMW: .fMax, ordering: .acquire, for: i) + + case .atomic_fmax_release: + insert(atomicRMW: .fMax, ordering: .release, for: i) + + case .atomic_fmax_acqrel: + insert(atomicRMW: .fMax, ordering: .acquireRelease, for: i) + + case .atomic_fmax_seqcst: + insert(atomicRMW: .fMax, ordering: .sequentiallyConsistent, for: i) + + case .atomic_min_relaxed: + insert(atomicRMW: .min, ordering: .monotonic, for: i) + + case .atomic_min_acquire: + insert(atomicRMW: .min, ordering: .acquire, for: i) + + case .atomic_min_release: + insert(atomicRMW: .min, ordering: .release, for: i) + + case .atomic_min_acqrel: + insert(atomicRMW: .min, ordering: .acquireRelease, for: i) + + case .atomic_min_seqcst: + insert(atomicRMW: .min, ordering: .sequentiallyConsistent, for: i) + + case .atomic_umin_relaxed: + insert(atomicRMW: .uMin, ordering: .monotonic, for: i) + + case .atomic_umin_acquire: + insert(atomicRMW: .uMin, ordering: .acquire, for: i) + + case .atomic_umin_release: + insert(atomicRMW: .uMin, ordering: .release, for: i) + + case .atomic_umin_acqrel: + insert(atomicRMW: .uMin, ordering: .acquireRelease, for: i) + + case .atomic_umin_seqcst: + insert(atomicRMW: .uMin, ordering: .sequentiallyConsistent, for: i) + + case .atomic_fmin_relaxed: + insert(atomicRMW: .fMin, ordering: .monotonic, for: i) + + case .atomic_fmin_acquire: + insert(atomicRMW: .fMin, ordering: .acquire, for: i) + + case .atomic_fmin_release: + insert(atomicRMW: .fMin, ordering: .release, for: i) + + case .atomic_fmin_acqrel: + insert(atomicRMW: .fMin, ordering: .acquireRelease, for: i) + + case .atomic_fmin_seqcst: + insert(atomicRMW: .fMin, ordering: .sequentiallyConsistent, for: i) + + case .atomic_and_relaxed: + insert(atomicRMW: .and, ordering: .monotonic, for: i) + + case .atomic_and_acquire: + insert(atomicRMW: .and, ordering: .acquire, for: i) + + case .atomic_and_release: + insert(atomicRMW: .and, ordering: .release, for: i) + + case .atomic_and_acqrel: + insert(atomicRMW: .and, ordering: .acquireRelease, for: i) + + case .atomic_and_seqcst: + insert(atomicRMW: .and, ordering: .sequentiallyConsistent, for: i) + + case .atomic_nand_relaxed: + insert(atomicRMW: .nand, ordering: .monotonic, for: i) + + case .atomic_nand_acquire: + insert(atomicRMW: .nand, ordering: .acquire, for: i) + + case .atomic_nand_release: + insert(atomicRMW: .nand, ordering: .release, for: i) + + case .atomic_nand_acqrel: + insert(atomicRMW: .nand, ordering: .acquireRelease, for: i) + + case .atomic_nand_seqcst: + insert(atomicRMW: .nand, ordering: .sequentiallyConsistent, for: i) + + case .atomic_or_relaxed: + insert(atomicRMW: .or, ordering: .monotonic, for: i) + + case .atomic_or_acquire: + insert(atomicRMW: .or, ordering: .acquire, for: i) + + case .atomic_or_release: + insert(atomicRMW: .or, ordering: .release, for: i) + + case .atomic_or_acqrel: + insert(atomicRMW: .or, ordering: .acquireRelease, for: i) + + case .atomic_or_seqcst: + insert(atomicRMW: .or, ordering: .sequentiallyConsistent, for: i) + + case .atomic_xor_relaxed: + insert(atomicRMW: .xor, ordering: .monotonic, for: i) + + case .atomic_xor_acquire: + insert(atomicRMW: .xor, ordering: .acquire, for: i) + + case .atomic_xor_release: + insert(atomicRMW: .xor, ordering: .release, for: i) + + case .atomic_xor_acqrel: + insert(atomicRMW: .xor, ordering: .acquireRelease, for: i) + + case .atomic_xor_seqcst: + insert(atomicRMW: .xor, ordering: .sequentiallyConsistent, for: i) + + case .atomic_cmpxchg_relaxed_relaxed: + insertAtomicCompareExchange(successOrdering: .monotonic, failureOrdering: .monotonic, weak: false, for: i) + + case .atomic_cmpxchg_relaxed_acquire: + insertAtomicCompareExchange(successOrdering: .monotonic, failureOrdering: .acquire, weak: false, for: i) + + case .atomic_cmpxchg_relaxed_seqcst: + insertAtomicCompareExchange(successOrdering: .monotonic, failureOrdering: .sequentiallyConsistent, weak: false, for: i) + + case .atomic_cmpxchg_acquire_relaxed: + insertAtomicCompareExchange(successOrdering: .acquire, failureOrdering: .monotonic, weak: false, for: i) + + case .atomic_cmpxchg_acquire_acquire: + insertAtomicCompareExchange(successOrdering: .acquire, failureOrdering: .acquire, weak: false, for: i) + + case .atomic_cmpxchg_acquire_seqcst: + insertAtomicCompareExchange(successOrdering: .acquire, failureOrdering: .sequentiallyConsistent, weak: false, for: i) + + case .atomic_cmpxchg_release_relaxed: + insertAtomicCompareExchange(successOrdering: .release, failureOrdering: .monotonic, weak: false, for: i) + + case .atomic_cmpxchg_release_acquire: + insertAtomicCompareExchange(successOrdering: .release, failureOrdering: .acquire, weak: false, for: i) + + case .atomic_cmpxchg_release_seqcst: + insertAtomicCompareExchange(successOrdering: .release, failureOrdering: .sequentiallyConsistent, weak: false, for: i) + + case .atomic_cmpxchg_acqrel_relaxed: + insertAtomicCompareExchange(successOrdering: .acquireRelease, failureOrdering: .monotonic, weak: false, for: i) + + case .atomic_cmpxchg_acqrel_acquire: + insertAtomicCompareExchange(successOrdering: .acquireRelease, failureOrdering: .acquire, weak: false, for: i) + + case .atomic_cmpxchg_acqrel_seqcst: + insertAtomicCompareExchange(successOrdering: .acquireRelease, failureOrdering: .sequentiallyConsistent, weak: false, for: i) + + case .atomic_cmpxchg_seqcst_relaxed: + insertAtomicCompareExchange(successOrdering: .sequentiallyConsistent, failureOrdering: .monotonic, weak: false, for: i) + + case .atomic_cmpxchg_seqcst_acquire: + insertAtomicCompareExchange(successOrdering: .sequentiallyConsistent, failureOrdering: .acquire, weak: false, for: i) + + case .atomic_cmpxchg_seqcst_seqcst: + insertAtomicCompareExchange(successOrdering: .sequentiallyConsistent, failureOrdering: .sequentiallyConsistent, weak: false, for: i) + + case .atomic_cmpxchgweak_relaxed_relaxed: + insertAtomicCompareExchange(successOrdering: .monotonic, failureOrdering: .monotonic, weak: true, for: i) + + case .atomic_cmpxchgweak_relaxed_acquire: + insertAtomicCompareExchange(successOrdering: .monotonic, failureOrdering: .acquire, weak: true, for: i) + + case .atomic_cmpxchgweak_relaxed_seqcst: + insertAtomicCompareExchange(successOrdering: .monotonic, failureOrdering: .sequentiallyConsistent, weak: true, for: i) + + case .atomic_cmpxchgweak_acquire_relaxed: + insertAtomicCompareExchange(successOrdering: .acquire, failureOrdering: .monotonic, weak: true, for: i) + + case .atomic_cmpxchgweak_acquire_acquire: + insertAtomicCompareExchange(successOrdering: .acquire, failureOrdering: .acquire, weak: true, for: i) + + case .atomic_cmpxchgweak_acquire_seqcst: + insertAtomicCompareExchange(successOrdering: .acquire, failureOrdering: .sequentiallyConsistent, weak: true, for: i) + + case .atomic_cmpxchgweak_release_relaxed: + insertAtomicCompareExchange(successOrdering: .release, failureOrdering: .monotonic, weak: true, for: i) + + case .atomic_cmpxchgweak_release_acquire: + insertAtomicCompareExchange(successOrdering: .release, failureOrdering: .acquire, weak: true, for: i) + + case .atomic_cmpxchgweak_release_seqcst: + insertAtomicCompareExchange(successOrdering: .release, failureOrdering: .sequentiallyConsistent, weak: true, for: i) + + case .atomic_cmpxchgweak_acqrel_relaxed: + insertAtomicCompareExchange(successOrdering: .acquireRelease, failureOrdering: .monotonic, weak: true, for: i) + + case .atomic_cmpxchgweak_acqrel_acquire: + insertAtomicCompareExchange(successOrdering: .acquireRelease, failureOrdering: .acquire, weak: true, for: i) + + case .atomic_cmpxchgweak_acqrel_seqcst: + insertAtomicCompareExchange(successOrdering: .acquireRelease, failureOrdering: .sequentiallyConsistent, weak: true, for: i) + + case .atomic_cmpxchgweak_seqcst_relaxed: + insertAtomicCompareExchange(successOrdering: .sequentiallyConsistent, failureOrdering: .monotonic, weak: true, for: i) + + case .atomic_cmpxchgweak_seqcst_acquire: + insertAtomicCompareExchange(successOrdering: .sequentiallyConsistent, failureOrdering: .acquire, weak: true, for: i) + + case .atomic_cmpxchgweak_seqcst_seqcst: + insertAtomicCompareExchange(successOrdering: .sequentiallyConsistent, failureOrdering: .sequentiallyConsistent, weak: true, for: i) + + case .atomic_fence_acquire: + insertAtomicFence(.acquire, singleThread: false, for: i) + + case .atomic_fence_release: + insertAtomicFence(.release, singleThread: false, for: i) + case .atomic_fence_acqrel: + + insertAtomicFence(.acquireRelease, singleThread: false, for: i) + case .atomic_fence_seqcst: + insertAtomicFence(.sequentiallyConsistent, singleThread: false, for: i) + + case .atomic_singlethreadfence_acquire: + insertAtomicFence(.acquire, singleThread: true, for: i) + + case .atomic_singlethreadfence_release: + insertAtomicFence(.release, singleThread: true, for: i) + + case .atomic_singlethreadfence_acqrel: + insertAtomicFence(.acquireRelease, singleThread: true, for: i) + + case .atomic_singlethreadfence_seqcst: + insertAtomicFence(.sequentiallyConsistent, singleThread: true, for: i) + default: unreachable("unexpected LLVM instruction '\(s.instruction)'") } } + /// Inserts the transpilation of `i`, which is an `oper`, using `ordering` at `insertionPoint`. + func insert(atomicRMW oper: AtomicRMWBinOp, ordering: AtomicOrdering, for i: IR.InstructionID) { + let s = context.source[i] as! IR.LLVMInstruction + let target = llvm(s.operands[0]) + let value = llvm(s.operands[1]) + let o = insertAtomicRMW(target, operation: oper, value: value, ordering: ordering, singleThread: false, at: insertionPoint) + register[.register(i)] = o + } + + /// Inserts the transpilation of `i` at `insertionPoint`. + func insertAtomicCompareExchange(successOrdering: AtomicOrdering, failureOrdering: AtomicOrdering, weak: Bool, for i: IR.InstructionID) { + let s = context.source[i] as! IR.LLVMInstruction + let target = llvm(s.operands[0]) + let old = llvm(s.operands[1]) + let new = llvm(s.operands[2]) + let o = insertAtomicCmpXchg( + target, + old: old, + new: new, + successOrdering: successOrdering, + failureOrdering: failureOrdering, + weak: weak, + singleThread: false, + at: insertionPoint) + register[.register(i)] = o + } + + /// Inserts the transpilation of `i` at `insertionPoint`. + func insertAtomicFence(_ ordering: AtomicOrdering, singleThread: Bool, for i: IR.InstructionID) { + insertFence(ordering, singleThread: singleThread, at: insertionPoint) + register[.register(i)] = ptr.null + } + /// Inserts the transpilation of `i` at `insertionPoint`. func insert(load i: IR.InstructionID) { let s = context.source[i] as! Load diff --git a/Sources/FrontEnd/BuiltinFunction.swift b/Sources/FrontEnd/BuiltinFunction.swift index 41635bf9f..b5d7aa5d9 100644 --- a/Sources/FrontEnd/BuiltinFunction.swift +++ b/Sources/FrontEnd/BuiltinFunction.swift @@ -268,23 +268,259 @@ extension BuiltinFunction { /// Creates an atomic built-in function named `n` or returns `nil` if `n` isn't a valid atomic builtin name. private init?(atomic n: String) { - guard let (fs, ts) = splitLastUnderscore(n) else { return nil } - guard let t = BuiltinType.init(ts) else { return nil } - switch fs { - case "atomic_store_relaxed": - self = .init(name: .llvm(.atomic_store_relaxed(t))) - case "atomic_store_release": - self = .init(name: .llvm(.atomic_store_release(t))) - case "atomic_store_seqcst": - self = .init(name: .llvm(.atomic_store_seqcst(t))) - case "atomic_load_relaxed": - self = .init(name: .llvm(.atomic_load_relaxed(t))) - case "atomic_load_acquire": - self = .init(name: .llvm(.atomic_load_acquire(t))) - case "atomic_load_seqcst": - self = .init(name: .llvm(.atomic_load_seqcst(t))) - default: - return nil + // Special case for fence instructions; we don't have a type for them. + if n.contains("fence") { + switch n { + case "atomic_fence_acquire": + self = .init(name: .llvm(.atomic_fence_acquire)) + case "atomic_fence_release": + self = .init(name: .llvm(.atomic_fence_release)) + case "atomic_fence_acqrel": + self = .init(name: .llvm(.atomic_fence_acqrel)) + case "atomic_fence_seqcst": + self = .init(name: .llvm(.atomic_fence_seqcst)) + case "atomic_singlethreadfence_acquire": + self = .init(name: .llvm(.atomic_singlethreadfence_acquire)) + case "atomic_singlethreadfence_release": + self = .init(name: .llvm(.atomic_singlethreadfence_release)) + case "atomic_singlethreadfence_acqrel": + self = .init(name: .llvm(.atomic_singlethreadfence_acqrel)) + case "atomic_singlethreadfence_seqcst": + self = .init(name: .llvm(.atomic_singlethreadfence_seqcst)) + default: + return nil + } + } + else { + // For the rest of the atomics we have a type at the end. + guard let (fs, ts) = splitLastUnderscore(n) else { return nil } + guard let t = BuiltinType.init(ts) else { return nil } + switch fs { + case "atomic_store_relaxed": + self = .init(name: .llvm(.atomic_store_relaxed(t))) + case "atomic_store_release": + self = .init(name: .llvm(.atomic_store_release(t))) + case "atomic_store_seqcst": + self = .init(name: .llvm(.atomic_store_seqcst(t))) + case "atomic_load_relaxed": + self = .init(name: .llvm(.atomic_load_relaxed(t))) + case "atomic_load_acquire": + self = .init(name: .llvm(.atomic_load_acquire(t))) + case "atomic_load_seqcst": + self = .init(name: .llvm(.atomic_load_seqcst(t))) + case "atomic_swap_relaxed": + self = .init(name: .llvm(.atomic_swap_relaxed(t))) + case "atomic_swap_acquire": + self = .init(name: .llvm(.atomic_swap_acquire(t))) + case "atomic_swap_release": + self = .init(name: .llvm(.atomic_swap_release(t))) + case "atomic_swap_acqrel": + self = .init(name: .llvm(.atomic_swap_acqrel(t))) + case "atomic_swap_seqcst": + self = .init(name: .llvm(.atomic_swap_seqcst(t))) + case "atomic_add_relaxed": + self = .init(name: .llvm(.atomic_add_relaxed(t))) + case "atomic_add_acquire": + self = .init(name: .llvm(.atomic_add_acquire(t))) + case "atomic_add_release": + self = .init(name: .llvm(.atomic_add_release(t))) + case "atomic_add_acqrel": + self = .init(name: .llvm(.atomic_add_acqrel(t))) + case "atomic_add_seqcst": + self = .init(name: .llvm(.atomic_add_seqcst(t))) + case "atomic_fadd_relaxed": + self = .init(name: .llvm(.atomic_fadd_relaxed(t))) + case "atomic_fadd_acquire": + self = .init(name: .llvm(.atomic_fadd_acquire(t))) + case "atomic_fadd_release": + self = .init(name: .llvm(.atomic_fadd_release(t))) + case "atomic_fadd_acqrel": + self = .init(name: .llvm(.atomic_fadd_acqrel(t))) + case "atomic_fadd_seqcst": + self = .init(name: .llvm(.atomic_fadd_seqcst(t))) + case "atomic_sub_relaxed": + self = .init(name: .llvm(.atomic_sub_relaxed(t))) + case "atomic_sub_acquire": + self = .init(name: .llvm(.atomic_sub_acquire(t))) + case "atomic_sub_release": + self = .init(name: .llvm(.atomic_sub_release(t))) + case "atomic_sub_acqrel": + self = .init(name: .llvm(.atomic_sub_acqrel(t))) + case "atomic_sub_seqcst": + self = .init(name: .llvm(.atomic_sub_seqcst(t))) + case "atomic_fsub_relaxed": + self = .init(name: .llvm(.atomic_fsub_relaxed(t))) + case "atomic_fsub_acquire": + self = .init(name: .llvm(.atomic_fsub_acquire(t))) + case "atomic_fsub_release": + self = .init(name: .llvm(.atomic_fsub_release(t))) + case "atomic_fsub_acqrel": + self = .init(name: .llvm(.atomic_fsub_acqrel(t))) + case "atomic_fsub_seqcst": + self = .init(name: .llvm(.atomic_fsub_seqcst(t))) + case "atomic_max_relaxed": + self = .init(name: .llvm(.atomic_max_relaxed(t))) + case "atomic_max_acquire": + self = .init(name: .llvm(.atomic_max_acquire(t))) + case "atomic_max_release": + self = .init(name: .llvm(.atomic_max_release(t))) + case "atomic_max_acqrel": + self = .init(name: .llvm(.atomic_max_acqrel(t))) + case "atomic_max_seqcst": + self = .init(name: .llvm(.atomic_max_seqcst(t))) + case "atomic_umax_relaxed": + self = .init(name: .llvm(.atomic_umax_relaxed(t))) + case "atomic_umax_acquire": + self = .init(name: .llvm(.atomic_umax_acquire(t))) + case "atomic_umax_release": + self = .init(name: .llvm(.atomic_umax_release(t))) + case "atomic_umax_acqrel": + self = .init(name: .llvm(.atomic_umax_acqrel(t))) + case "atomic_umax_seqcst": + self = .init(name: .llvm(.atomic_umax_seqcst(t))) + case "atomic_fmax_relaxed": + self = .init(name: .llvm(.atomic_fmax_relaxed(t))) + case "atomic_fmax_acquire": + self = .init(name: .llvm(.atomic_fmax_acquire(t))) + case "atomic_fmax_release": + self = .init(name: .llvm(.atomic_fmax_release(t))) + case "atomic_fmax_acqrel": + self = .init(name: .llvm(.atomic_fmax_acqrel(t))) + case "atomic_fmax_seqcst": + self = .init(name: .llvm(.atomic_fmax_seqcst(t))) + case "atomic_min_relaxed": + self = .init(name: .llvm(.atomic_min_relaxed(t))) + case "atomic_min_acquire": + self = .init(name: .llvm(.atomic_min_acquire(t))) + case "atomic_min_release": + self = .init(name: .llvm(.atomic_min_release(t))) + case "atomic_min_acqrel": + self = .init(name: .llvm(.atomic_min_acqrel(t))) + case "atomic_min_seqcst": + self = .init(name: .llvm(.atomic_min_seqcst(t))) + case "atomic_umin_relaxed": + self = .init(name: .llvm(.atomic_umin_relaxed(t))) + case "atomic_umin_acquire": + self = .init(name: .llvm(.atomic_umin_acquire(t))) + case "atomic_umin_release": + self = .init(name: .llvm(.atomic_umin_release(t))) + case "atomic_umin_acqrel": + self = .init(name: .llvm(.atomic_umin_acqrel(t))) + case "atomic_umin_seqcst": + self = .init(name: .llvm(.atomic_umin_seqcst(t))) + case "atomic_fmin_relaxed": + self = .init(name: .llvm(.atomic_fmin_relaxed(t))) + case "atomic_fmin_acquire": + self = .init(name: .llvm(.atomic_fmin_acquire(t))) + case "atomic_fmin_release": + self = .init(name: .llvm(.atomic_fmin_release(t))) + case "atomic_fmin_acqrel": + self = .init(name: .llvm(.atomic_fmin_acqrel(t))) + case "atomic_fmin_seqcst": + self = .init(name: .llvm(.atomic_fmin_seqcst(t))) + case "atomic_and_relaxed": + self = .init(name: .llvm(.atomic_and_relaxed(t))) + case "atomic_and_acquire": + self = .init(name: .llvm(.atomic_and_acquire(t))) + case "atomic_and_release": + self = .init(name: .llvm(.atomic_and_release(t))) + case "atomic_and_acqrel": + self = .init(name: .llvm(.atomic_and_acqrel(t))) + case "atomic_and_seqcst": + self = .init(name: .llvm(.atomic_and_seqcst(t))) + case "atomic_nand_relaxed": + self = .init(name: .llvm(.atomic_nand_relaxed(t))) + case "atomic_nand_acquire": + self = .init(name: .llvm(.atomic_nand_acquire(t))) + case "atomic_nand_release": + self = .init(name: .llvm(.atomic_nand_release(t))) + case "atomic_nand_acqrel": + self = .init(name: .llvm(.atomic_nand_acqrel(t))) + case "atomic_nand_seqcst": + self = .init(name: .llvm(.atomic_nand_seqcst(t))) + case "atomic_or_relaxed": + self = .init(name: .llvm(.atomic_or_relaxed(t))) + case "atomic_or_acquire": + self = .init(name: .llvm(.atomic_or_acquire(t))) + case "atomic_or_release": + self = .init(name: .llvm(.atomic_or_release(t))) + case "atomic_or_acqrel": + self = .init(name: .llvm(.atomic_or_acqrel(t))) + case "atomic_or_seqcst": + self = .init(name: .llvm(.atomic_or_seqcst(t))) + case "atomic_xor_relaxed": + self = .init(name: .llvm(.atomic_xor_relaxed(t))) + case "atomic_xor_acquire": + self = .init(name: .llvm(.atomic_xor_acquire(t))) + case "atomic_xor_release": + self = .init(name: .llvm(.atomic_xor_release(t))) + case "atomic_xor_acqrel": + self = .init(name: .llvm(.atomic_xor_acqrel(t))) + case "atomic_xor_seqcst": + self = .init(name: .llvm(.atomic_xor_seqcst(t))) + case "atomic_cmpxchg_relaxed_relaxed": + self = .init(name: .llvm(.atomic_cmpxchg_relaxed_relaxed(t))) + case "atomic_cmpxchg_relaxed_acquire": + self = .init(name: .llvm(.atomic_cmpxchg_relaxed_acquire(t))) + case "atomic_cmpxchg_relaxed_seqcst": + self = .init(name: .llvm(.atomic_cmpxchg_relaxed_seqcst(t))) + case "atomic_cmpxchg_acquire_relaxed": + self = .init(name: .llvm(.atomic_cmpxchg_acquire_relaxed(t))) + case "atomic_cmpxchg_acquire_acquire": + self = .init(name: .llvm(.atomic_cmpxchg_acquire_acquire(t))) + case "atomic_cmpxchg_acquire_seqcst": + self = .init(name: .llvm(.atomic_cmpxchg_acquire_seqcst(t))) + case "atomic_cmpxchg_release_relaxed": + self = .init(name: .llvm(.atomic_cmpxchg_release_relaxed(t))) + case "atomic_cmpxchg_release_acquire": + self = .init(name: .llvm(.atomic_cmpxchg_release_acquire(t))) + case "atomic_cmpxchg_release_seqcst": + self = .init(name: .llvm(.atomic_cmpxchg_release_seqcst(t))) + case "atomic_cmpxchg_acqrel_relaxed": + self = .init(name: .llvm(.atomic_cmpxchg_acqrel_relaxed(t))) + case "atomic_cmpxchg_acqrel_acquire": + self = .init(name: .llvm(.atomic_cmpxchg_acqrel_acquire(t))) + case "atomic_cmpxchg_acqrel_seqcst": + self = .init(name: .llvm(.atomic_cmpxchg_acqrel_seqcst(t))) + case "atomic_cmpxchg_seqcst_relaxed": + self = .init(name: .llvm(.atomic_cmpxchg_seqcst_relaxed(t))) + case "atomic_cmpxchg_seqcst_acquire": + self = .init(name: .llvm(.atomic_cmpxchg_seqcst_acquire(t))) + case "atomic_cmpxchg_seqcst_seqcst": + self = .init(name: .llvm(.atomic_cmpxchg_seqcst_seqcst(t))) + case "atomic_cmpxchgweak_relaxed_relaxed": + self = .init(name: .llvm(.atomic_cmpxchgweak_relaxed_relaxed(t))) + case "atomic_cmpxchgweak_relaxed_acquire": + self = .init(name: .llvm(.atomic_cmpxchgweak_relaxed_acquire(t))) + case "atomic_cmpxchgweak_relaxed_seqcst": + self = .init(name: .llvm(.atomic_cmpxchgweak_relaxed_seqcst(t))) + case "atomic_cmpxchgweak_acquire_relaxed": + self = .init(name: .llvm(.atomic_cmpxchgweak_acquire_relaxed(t))) + case "atomic_cmpxchgweak_acquire_acquire": + self = .init(name: .llvm(.atomic_cmpxchgweak_acquire_acquire(t))) + case "atomic_cmpxchgweak_acquire_seqcst": + self = .init(name: .llvm(.atomic_cmpxchgweak_acquire_seqcst(t))) + case "atomic_cmpxchgweak_release_relaxed": + self = .init(name: .llvm(.atomic_cmpxchgweak_release_relaxed(t))) + case "atomic_cmpxchgweak_release_acquire": + self = .init(name: .llvm(.atomic_cmpxchgweak_release_acquire(t))) + case "atomic_cmpxchgweak_release_seqcst": + self = .init(name: .llvm(.atomic_cmpxchgweak_release_seqcst(t))) + case "atomic_cmpxchgweak_acqrel_relaxed": + self = .init(name: .llvm(.atomic_cmpxchgweak_acqrel_relaxed(t))) + case "atomic_cmpxchgweak_acqrel_acquire": + self = .init(name: .llvm(.atomic_cmpxchgweak_acqrel_acquire(t))) + case "atomic_cmpxchgweak_acqrel_seqcst": + self = .init(name: .llvm(.atomic_cmpxchgweak_acqrel_seqcst(t))) + case "atomic_cmpxchgweak_seqcst_relaxed": + self = .init(name: .llvm(.atomic_cmpxchgweak_seqcst_relaxed(t))) + case "atomic_cmpxchgweak_seqcst_acquire": + self = .init(name: .llvm(.atomic_cmpxchgweak_seqcst_acquire(t))) + case "atomic_cmpxchgweak_seqcst_seqcst": + self = .init(name: .llvm(.atomic_cmpxchgweak_seqcst_seqcst(t))) + default: + return nil + } } } diff --git a/Sources/FrontEnd/NativeInstruction.swift b/Sources/FrontEnd/NativeInstruction.swift index b269163f6..a2ef5acbf 100644 --- a/Sources/FrontEnd/NativeInstruction.swift +++ b/Sources/FrontEnd/NativeInstruction.swift @@ -130,6 +130,233 @@ public enum NativeInstruction: Hashable { case atomic_load_seqcst(BuiltinType) + case atomic_swap_relaxed(BuiltinType) + + case atomic_swap_acquire(BuiltinType) + + case atomic_swap_release(BuiltinType) + + case atomic_swap_acqrel(BuiltinType) + + case atomic_swap_seqcst(BuiltinType) + + case atomic_add_relaxed(BuiltinType) + + case atomic_add_acquire(BuiltinType) + + case atomic_add_release(BuiltinType) + + case atomic_add_acqrel(BuiltinType) + + case atomic_add_seqcst(BuiltinType) + + case atomic_fadd_relaxed(BuiltinType) + + case atomic_fadd_acquire(BuiltinType) + + case atomic_fadd_release(BuiltinType) + + case atomic_fadd_acqrel(BuiltinType) + + case atomic_fadd_seqcst(BuiltinType) + + case atomic_sub_relaxed(BuiltinType) + + case atomic_sub_acquire(BuiltinType) + + case atomic_sub_release(BuiltinType) + + case atomic_sub_acqrel(BuiltinType) + + case atomic_sub_seqcst(BuiltinType) + + case atomic_fsub_relaxed(BuiltinType) + + case atomic_fsub_acquire(BuiltinType) + + case atomic_fsub_release(BuiltinType) + + case atomic_fsub_acqrel(BuiltinType) + + case atomic_fsub_seqcst(BuiltinType) + + case atomic_max_relaxed(BuiltinType) + + case atomic_max_acquire(BuiltinType) + + case atomic_max_release(BuiltinType) + + case atomic_max_acqrel(BuiltinType) + + case atomic_max_seqcst(BuiltinType) + + case atomic_umax_relaxed(BuiltinType) + + case atomic_umax_acquire(BuiltinType) + + case atomic_umax_release(BuiltinType) + + case atomic_umax_acqrel(BuiltinType) + + case atomic_umax_seqcst(BuiltinType) + + case atomic_fmax_relaxed(BuiltinType) + + case atomic_fmax_acquire(BuiltinType) + + case atomic_fmax_release(BuiltinType) + + case atomic_fmax_acqrel(BuiltinType) + + case atomic_fmax_seqcst(BuiltinType) + + case atomic_min_relaxed(BuiltinType) + + case atomic_min_acquire(BuiltinType) + + case atomic_min_release(BuiltinType) + + case atomic_min_acqrel(BuiltinType) + + case atomic_min_seqcst(BuiltinType) + + case atomic_umin_relaxed(BuiltinType) + + case atomic_umin_acquire(BuiltinType) + + case atomic_umin_release(BuiltinType) + + case atomic_umin_acqrel(BuiltinType) + + case atomic_umin_seqcst(BuiltinType) + + case atomic_fmin_relaxed(BuiltinType) + + case atomic_fmin_acquire(BuiltinType) + + case atomic_fmin_release(BuiltinType) + + case atomic_fmin_acqrel(BuiltinType) + + case atomic_fmin_seqcst(BuiltinType) + + case atomic_and_relaxed(BuiltinType) + + case atomic_and_acquire(BuiltinType) + + case atomic_and_release(BuiltinType) + + case atomic_and_acqrel(BuiltinType) + + case atomic_and_seqcst(BuiltinType) + + case atomic_nand_relaxed(BuiltinType) + + case atomic_nand_acquire(BuiltinType) + + case atomic_nand_release(BuiltinType) + + case atomic_nand_acqrel(BuiltinType) + + case atomic_nand_seqcst(BuiltinType) + + case atomic_or_relaxed(BuiltinType) + + case atomic_or_acquire(BuiltinType) + + case atomic_or_release(BuiltinType) + + case atomic_or_acqrel(BuiltinType) + + case atomic_or_seqcst(BuiltinType) + + case atomic_xor_relaxed(BuiltinType) + + case atomic_xor_acquire(BuiltinType) + + case atomic_xor_release(BuiltinType) + + case atomic_xor_acqrel(BuiltinType) + + case atomic_xor_seqcst(BuiltinType) + + case atomic_cmpxchg_relaxed_relaxed(BuiltinType) + + case atomic_cmpxchg_relaxed_acquire(BuiltinType) + + case atomic_cmpxchg_relaxed_seqcst(BuiltinType) + + case atomic_cmpxchg_acquire_relaxed(BuiltinType) + + case atomic_cmpxchg_acquire_acquire(BuiltinType) + + case atomic_cmpxchg_acquire_seqcst(BuiltinType) + + case atomic_cmpxchg_release_relaxed(BuiltinType) + + case atomic_cmpxchg_release_acquire(BuiltinType) + + case atomic_cmpxchg_release_seqcst(BuiltinType) + + case atomic_cmpxchg_acqrel_relaxed(BuiltinType) + + case atomic_cmpxchg_acqrel_acquire(BuiltinType) + + case atomic_cmpxchg_acqrel_seqcst(BuiltinType) + + case atomic_cmpxchg_seqcst_relaxed(BuiltinType) + + case atomic_cmpxchg_seqcst_acquire(BuiltinType) + + case atomic_cmpxchg_seqcst_seqcst(BuiltinType) + + case atomic_cmpxchgweak_relaxed_relaxed(BuiltinType) + + case atomic_cmpxchgweak_relaxed_acquire(BuiltinType) + + case atomic_cmpxchgweak_relaxed_seqcst(BuiltinType) + + case atomic_cmpxchgweak_acquire_relaxed(BuiltinType) + + case atomic_cmpxchgweak_acquire_acquire(BuiltinType) + + case atomic_cmpxchgweak_acquire_seqcst(BuiltinType) + + case atomic_cmpxchgweak_release_relaxed(BuiltinType) + + case atomic_cmpxchgweak_release_acquire(BuiltinType) + + case atomic_cmpxchgweak_release_seqcst(BuiltinType) + + case atomic_cmpxchgweak_acqrel_relaxed(BuiltinType) + + case atomic_cmpxchgweak_acqrel_acquire(BuiltinType) + + case atomic_cmpxchgweak_acqrel_seqcst(BuiltinType) + + case atomic_cmpxchgweak_seqcst_relaxed(BuiltinType) + + case atomic_cmpxchgweak_seqcst_acquire(BuiltinType) + + case atomic_cmpxchgweak_seqcst_seqcst(BuiltinType) + + case atomic_fence_acquire + + case atomic_fence_release + + case atomic_fence_acqrel + + case atomic_fence_seqcst + + case atomic_singlethreadfence_acquire + + case atomic_singlethreadfence_release + + case atomic_singlethreadfence_acqrel + + case atomic_singlethreadfence_seqcst + + /// The parameters of a floating-point LLVM instruction. public struct MathFlags: OptionSet, Hashable { @@ -250,18 +477,139 @@ extension NativeInstruction { return .init(to: ^t) case .advancedByBytes(let byteOffset): return .init(.builtin(.ptr), ^byteOffset, to: .builtin(.ptr)) - case .atomic_store_relaxed(let t): - return .init(.builtin(.ptr), ^t, to: .builtin(.ptr)) - case .atomic_store_release(let t): + case .atomic_store_relaxed(let t), + .atomic_store_release(let t), + .atomic_store_seqcst(let t): return .init(.builtin(.ptr), ^t, to: .builtin(.ptr)) - case .atomic_store_seqcst(let t): - return .init(.builtin(.ptr), ^t, to: .builtin(.ptr)) - case .atomic_load_relaxed(let t): - return .init(.builtin(.ptr), to: ^t) - case .atomic_load_acquire(let t): - return .init(.builtin(.ptr), to: ^t) - case .atomic_load_seqcst(let t): + case .atomic_load_relaxed(let t), + .atomic_load_acquire(let t), + .atomic_load_seqcst(let t): return .init(.builtin(.ptr), to: ^t) + case .atomic_swap_relaxed(let t), + .atomic_swap_acquire(let t), + .atomic_swap_release(let t), + .atomic_swap_acqrel(let t), + .atomic_swap_seqcst(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_add_relaxed(let t), + .atomic_add_acquire(let t), + .atomic_add_release(let t), + .atomic_add_acqrel(let t), + .atomic_add_seqcst(let t), + .atomic_fadd_relaxed(let t), + .atomic_fadd_acquire(let t), + .atomic_fadd_release(let t), + .atomic_fadd_acqrel(let t), + .atomic_fadd_seqcst(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_sub_relaxed(let t), + .atomic_sub_acquire(let t), + .atomic_sub_release(let t), + .atomic_sub_acqrel(let t), + .atomic_sub_seqcst(let t), + .atomic_fsub_relaxed(let t), + .atomic_fsub_acquire(let t), + .atomic_fsub_release(let t), + .atomic_fsub_acqrel(let t), + .atomic_fsub_seqcst(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_max_relaxed(let t), + .atomic_max_acquire(let t), + .atomic_max_release(let t), + .atomic_max_acqrel(let t), + .atomic_max_seqcst(let t), + .atomic_umax_relaxed(let t), + .atomic_umax_acquire(let t), + .atomic_umax_release(let t), + .atomic_umax_acqrel(let t), + .atomic_umax_seqcst(let t), + .atomic_fmax_relaxed(let t), + .atomic_fmax_acquire(let t), + .atomic_fmax_release(let t), + .atomic_fmax_acqrel(let t), + .atomic_fmax_seqcst(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_min_relaxed(let t), + .atomic_min_acquire(let t), + .atomic_min_release(let t), + .atomic_min_acqrel(let t), + .atomic_min_seqcst(let t), + .atomic_umin_relaxed(let t), + .atomic_umin_acquire(let t), + .atomic_umin_release(let t), + .atomic_umin_acqrel(let t), + .atomic_umin_seqcst(let t), + .atomic_fmin_relaxed(let t), + .atomic_fmin_acquire(let t), + .atomic_fmin_release(let t), + .atomic_fmin_acqrel(let t), + .atomic_fmin_seqcst(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_and_relaxed(let t), + .atomic_and_acquire(let t), + .atomic_and_release(let t), + .atomic_and_acqrel(let t), + .atomic_and_seqcst(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_nand_relaxed(let t), + .atomic_nand_acquire(let t), + .atomic_nand_release(let t), + .atomic_nand_acqrel(let t), + .atomic_nand_seqcst(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_or_relaxed(let t), + .atomic_or_acquire(let t), + .atomic_or_release(let t), + .atomic_or_acqrel(let t), + .atomic_or_seqcst(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_xor_relaxed(let t), + .atomic_xor_acquire(let t), + .atomic_xor_release(let t), + .atomic_xor_acqrel(let t), + .atomic_xor_seqcst(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_cmpxchg_relaxed_relaxed(let t), + .atomic_cmpxchg_relaxed_acquire(let t), + .atomic_cmpxchg_relaxed_seqcst(let t), + .atomic_cmpxchg_acquire_relaxed(let t), + .atomic_cmpxchg_acquire_acquire(let t), + .atomic_cmpxchg_acquire_seqcst(let t), + .atomic_cmpxchg_release_relaxed(let t), + .atomic_cmpxchg_release_acquire(let t), + .atomic_cmpxchg_release_seqcst(let t), + .atomic_cmpxchg_acqrel_relaxed(let t), + .atomic_cmpxchg_acqrel_acquire(let t), + .atomic_cmpxchg_acqrel_seqcst(let t), + .atomic_cmpxchg_seqcst_relaxed(let t), + .atomic_cmpxchg_seqcst_acquire(let t), + .atomic_cmpxchg_seqcst_seqcst(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchgweak_relaxed_relaxed(let t), + .atomic_cmpxchgweak_relaxed_acquire(let t), + .atomic_cmpxchgweak_relaxed_seqcst(let t), + .atomic_cmpxchgweak_acquire_relaxed(let t), + .atomic_cmpxchgweak_acquire_acquire(let t), + .atomic_cmpxchgweak_acquire_seqcst(let t), + .atomic_cmpxchgweak_release_relaxed(let t), + .atomic_cmpxchgweak_release_acquire(let t), + .atomic_cmpxchgweak_release_seqcst(let t), + .atomic_cmpxchgweak_acqrel_relaxed(let t), + .atomic_cmpxchgweak_acqrel_acquire(let t), + .atomic_cmpxchgweak_acqrel_seqcst(let t), + .atomic_cmpxchgweak_seqcst_relaxed(let t), + .atomic_cmpxchgweak_seqcst_acquire(let t), + .atomic_cmpxchgweak_seqcst_seqcst(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_fence_acquire, + .atomic_fence_release, + .atomic_fence_acqrel, + .atomic_fence_seqcst, + .atomic_singlethreadfence_acquire, + .atomic_singlethreadfence_release, + .atomic_singlethreadfence_acqrel, + .atomic_singlethreadfence_seqcst: + return .init(to: .void) } } @@ -367,6 +715,232 @@ extension NativeInstruction: CustomStringConvertible { return "atomic_load_acquire_\(t)" case .atomic_load_seqcst(let t): return "atomic_load_seqcst_\(t)" + case .atomic_swap_relaxed(let t): + return "atomic_swap_relaxed_\(t)" + case .atomic_swap_acquire(let t): + return "atomic_swap_acquire_\(t)" + case .atomic_swap_release(let t): + return "atomic_swap_release_\(t)" + case .atomic_swap_acqrel(let t): + return "atomic_swap_acqrel_\(t)" + case .atomic_swap_seqcst(let t): + return "atomic_swap_seqcst_\(t)" + case .atomic_add_relaxed(let t): + return "atomic_add_relaxed_\(t)" + case .atomic_add_acquire(let t): + return "atomic_add_acquire_\(t)" + case .atomic_add_release(let t): + return "atomic_add_release_\(t)" + case .atomic_add_acqrel(let t): + return "atomic_add_acqrel_\(t)" + case .atomic_add_seqcst(let t): + return "atomic_add_seqcst_\(t)" + case .atomic_fadd_relaxed(let t): + return "atomic_fadd_relaxed_\(t)" + case .atomic_fadd_acquire(let t): + return "atomic_fadd_acquire_\(t)" + case .atomic_fadd_release(let t): + return "atomic_fadd_release_\(t)" + case .atomic_fadd_acqrel(let t): + return "atomic_fadd_acqrel_\(t)" + case .atomic_fadd_seqcst(let t): + return "atomic_fadd_seqcst_\(t)" + case .atomic_sub_relaxed(let t): + return "atomic_sub_relaxed_\(t)" + case .atomic_sub_acquire(let t): + return "atomic_sub_acquire_\(t)" + case .atomic_sub_release(let t): + return "atomic_sub_release_\(t)" + case .atomic_sub_acqrel(let t): + return "atomic_sub_acqrel_\(t)" + case .atomic_sub_seqcst(let t): + return "atomic_sub_seqcst_\(t)" + case .atomic_fsub_relaxed(let t): + return "atomic_fsub_relaxed_\(t)" + case .atomic_fsub_acquire(let t): + return "atomic_fsub_acquire_\(t)" + case .atomic_fsub_release(let t): + return "atomic_fsub_release_\(t)" + case .atomic_fsub_acqrel(let t): + return "atomic_fsub_acqrel_\(t)" + case .atomic_fsub_seqcst(let t): + return "atomic_fsub_seqcst_\(t)" + case .atomic_max_relaxed(let t): + return "atomic_max_relaxed_\(t)" + case .atomic_max_acquire(let t): + return "atomic_max_acquire_\(t)" + case .atomic_max_release(let t): + return "atomic_max_release_\(t)" + case .atomic_max_acqrel(let t): + return "atomic_max_acqrel_\(t)" + case .atomic_max_seqcst(let t): + return "atomic_max_seqcst_\(t)" + case .atomic_umax_relaxed(let t): + return "atomic_umax_relaxed_\(t)" + case .atomic_umax_acquire(let t): + return "atomic_umax_acquire_\(t)" + case .atomic_umax_release(let t): + return "atomic_umax_release_\(t)" + case .atomic_umax_acqrel(let t): + return "atomic_umax_acqrel_\(t)" + case .atomic_umax_seqcst(let t): + return "atomic_umax_seqcst_\(t)" + case .atomic_fmax_relaxed(let t): + return "atomic_fmax_relaxed_\(t)" + case .atomic_fmax_acquire(let t): + return "atomic_fmax_acquire_\(t)" + case .atomic_fmax_release(let t): + return "atomic_fmax_release_\(t)" + case .atomic_fmax_acqrel(let t): + return "atomic_fmax_acqrel_\(t)" + case .atomic_fmax_seqcst(let t): + return "atomic_fmax_seqcst_\(t)" + case .atomic_min_relaxed(let t): + return "atomic_min_relaxed_\(t)" + case .atomic_min_acquire(let t): + return "atomic_min_acquire_\(t)" + case .atomic_min_release(let t): + return "atomic_min_release_\(t)" + case .atomic_min_acqrel(let t): + return "atomic_min_acqrel_\(t)" + case .atomic_min_seqcst(let t): + return "atomic_min_seqcst_\(t)" + case .atomic_umin_relaxed(let t): + return "atomic_umin_relaxed_\(t)" + case .atomic_umin_acquire(let t): + return "atomic_umin_acquire_\(t)" + case .atomic_umin_release(let t): + return "atomic_umin_release_\(t)" + case .atomic_umin_acqrel(let t): + return "atomic_umin_acqrel_\(t)" + case .atomic_umin_seqcst(let t): + return "atomic_umin_seqcst_\(t)" + case .atomic_fmin_relaxed(let t): + return "atomic_fmin_relaxed_\(t)" + case .atomic_fmin_acquire(let t): + return "atomic_fmin_acquire_\(t)" + case .atomic_fmin_release(let t): + return "atomic_fmin_release_\(t)" + case .atomic_fmin_acqrel(let t): + return "atomic_fmin_acqrel_\(t)" + case .atomic_fmin_seqcst(let t): + return "atomic_fmin_seqcst_\(t)" + case .atomic_and_relaxed(let t): + return "atomic_and_relaxed_\(t)" + case .atomic_and_acquire(let t): + return "atomic_and_acquire_\(t)" + case .atomic_and_release(let t): + return "atomic_and_release_\(t)" + case .atomic_and_acqrel(let t): + return "atomic_and_acqrel_\(t)" + case .atomic_and_seqcst(let t): + return "atomic_and_seqcst_\(t)" + case .atomic_nand_relaxed(let t): + return "atomic_nand_relaxed_\(t)" + case .atomic_nand_acquire(let t): + return "atomic_nand_acquire_\(t)" + case .atomic_nand_release(let t): + return "atomic_nand_release_\(t)" + case .atomic_nand_acqrel(let t): + return "atomic_nand_acqrel_\(t)" + case .atomic_nand_seqcst(let t): + return "atomic_nand_seqcst_\(t)" + case .atomic_or_relaxed(let t): + return "atomic_or_relaxed_\(t)" + case .atomic_or_acquire(let t): + return "atomic_or_acquire_\(t)" + case .atomic_or_release(let t): + return "atomic_or_release_\(t)" + case .atomic_or_acqrel(let t): + return "atomic_or_acqrel_\(t)" + case .atomic_or_seqcst(let t): + return "atomic_or_seqcst_\(t)" + case .atomic_xor_relaxed(let t): + return "atomic_xor_relaxed_\(t)" + case .atomic_xor_acquire(let t): + return "atomic_xor_acquire_\(t)" + case .atomic_xor_release(let t): + return "atomic_xor_release_\(t)" + case .atomic_xor_acqrel(let t): + return "atomic_xor_acqrel_\(t)" + case .atomic_xor_seqcst(let t): + return "atomic_xor_seqcst_\(t)" + case .atomic_cmpxchg_relaxed_relaxed(let t): + return "atomic_cmpxchg_relaxed_relaxed_\(t)" + case .atomic_cmpxchg_relaxed_acquire(let t): + return "atomic_cmpxchg_relaxed_acquire_\(t)" + case .atomic_cmpxchg_relaxed_seqcst(let t): + return "atomic_cmpxchg_relaxed_seqcst_\(t)" + case .atomic_cmpxchg_acquire_relaxed(let t): + return "atomic_cmpxchg_acquire_relaxed_\(t)" + case .atomic_cmpxchg_acquire_acquire(let t): + return "atomic_cmpxchg_acquire_acquire_\(t)" + case .atomic_cmpxchg_acquire_seqcst(let t): + return "atomic_cmpxchg_acquire_seqcst_\(t)" + case .atomic_cmpxchg_release_relaxed(let t): + return "atomic_cmpxchg_release_relaxed_\(t)" + case .atomic_cmpxchg_release_acquire(let t): + return "atomic_cmpxchg_release_acquire_\(t)" + case .atomic_cmpxchg_release_seqcst(let t): + return "atomic_cmpxchg_release_seqcst_\(t)" + case .atomic_cmpxchg_acqrel_relaxed(let t): + return "atomic_cmpxchg_acqrel_relaxed_\(t)" + case .atomic_cmpxchg_acqrel_acquire(let t): + return "atomic_cmpxchg_acqrel_acquire_\(t)" + case .atomic_cmpxchg_acqrel_seqcst(let t): + return "atomic_cmpxchg_acqrel_seqcst_\(t)" + case .atomic_cmpxchg_seqcst_relaxed(let t): + return "atomic_cmpxchg_seqcst_relaxed_\(t)" + case .atomic_cmpxchg_seqcst_acquire(let t): + return "atomic_cmpxchg_seqcst_acquire_\(t)" + case .atomic_cmpxchg_seqcst_seqcst(let t): + return "atomic_cmpxchg_seqcst_seqcst_\(t)" + case .atomic_cmpxchgweak_relaxed_relaxed(let t): + return "atomic_cmpxchgweak_relaxed_relaxed_\(t)" + case .atomic_cmpxchgweak_relaxed_acquire(let t): + return "atomic_cmpxchgweak_relaxed_acquire_\(t)" + case .atomic_cmpxchgweak_relaxed_seqcst(let t): + return "atomic_cmpxchgweak_relaxed_seqcst_\(t)" + case .atomic_cmpxchgweak_acquire_relaxed(let t): + return "atomic_cmpxchgweak_acquire_relaxed_\(t)" + case .atomic_cmpxchgweak_acquire_acquire(let t): + return "atomic_cmpxchgweak_acquire_acquire_\(t)" + case .atomic_cmpxchgweak_acquire_seqcst(let t): + return "atomic_cmpxchgweak_acquire_seqcst_\(t)" + case .atomic_cmpxchgweak_release_relaxed(let t): + return "atomic_cmpxchgweak_release_relaxed_\(t)" + case .atomic_cmpxchgweak_release_acquire(let t): + return "atomic_cmpxchgweak_release_acquire_\(t)" + case .atomic_cmpxchgweak_release_seqcst(let t): + return "atomic_cmpxchgweak_release_seqcst_\(t)" + case .atomic_cmpxchgweak_acqrel_relaxed(let t): + return "atomic_cmpxchgweak_acqrel_relaxed_\(t)" + case .atomic_cmpxchgweak_acqrel_acquire(let t): + return "atomic_cmpxchgweak_acqrel_acquire_\(t)" + case .atomic_cmpxchgweak_acqrel_seqcst(let t): + return "atomic_cmpxchgweak_acqrel_seqcst_\(t)" + case .atomic_cmpxchgweak_seqcst_relaxed(let t): + return "atomic_cmpxchgweak_seqcst_relaxed_\(t)" + case .atomic_cmpxchgweak_seqcst_acquire(let t): + return "atomic_cmpxchgweak_seqcst_acquire_\(t)" + case .atomic_cmpxchgweak_seqcst_seqcst(let t): + return "atomic_cmpxchgweak_seqcst_seqcst_\(t)" + case .atomic_fence_acquire: + return "atomic_fence_acquire" + case .atomic_fence_release: + return "atomic_fence_release" + case .atomic_fence_acqrel: + return "atomic_fence_acqrel" + case .atomic_fence_seqcst: + return "atomic_fence_seqcst" + case .atomic_singlethreadfence_acquire: + return "atomic_singlethreadfence_acquire" + case .atomic_singlethreadfence_release: + return "atomic_singlethreadfence_release" + case .atomic_singlethreadfence_acqrel: + return "atomic_singlethreadfence_acqrel" + case .atomic_singlethreadfence_seqcst: + return "atomic_singlethreadfence_seqcst" } } diff --git a/Tests/HyloTests/BuiltinFunctionTests.swift b/Tests/HyloTests/BuiltinFunctionTests.swift index 0c3b95e5d..433ad5cf6 100644 --- a/Tests/HyloTests/BuiltinFunctionTests.swift +++ b/Tests/HyloTests/BuiltinFunctionTests.swift @@ -230,6 +230,110 @@ final class BuiltinFunctionTests: XCTestCase { createInstanceWithType: expectedType) } + func testAtomicRMWForSignedIntegers() throws { + let expectedType = ArrowType(.builtin(.ptr), .builtin(.i(64)), to: .builtin(.i(64))) + try assertParse( + instructions: [ + "atomic_swap", + "atomic_add", + "atomic_sub", + "atomic_max", + "atomic_min", + "atomic_and", + "atomic_nand", + "atomic_or", + "atomic_xor" + ], + parameterizedBy: [ + ["relaxed", "i64"], + ["acquire", "i64"], + ["release", "i64"], + ["acqrel", "i64"], + ["seqcst", "i64"] + ], + createInstanceWithType: expectedType) + } + + func testAtomicRMWForUnsignedIntegers() throws { + let expectedType = ArrowType(.builtin(.ptr), .builtin(.i(64)), to: .builtin(.i(64))) + try assertParse( + instructions: [ + "atomic_umax", + "atomic_umin" + ], + parameterizedBy: [ + ["relaxed", "i64"], + ["acquire", "i64"], + ["release", "i64"], + ["acqrel", "i64"], + ["seqcst", "i64"] + ], + createInstanceWithType: expectedType) + } + + func testAtomicRMWForFloatingNumbers() throws { + let expectedType = ArrowType(.builtin(.ptr), .builtin(.float64), to: .builtin(.float64)) + try assertParse( + instructions: [ + "atomic_swap", + "atomic_fadd", + "atomic_fsub", + "atomic_fmax", + "atomic_fmin" + ], + parameterizedBy: [ + ["relaxed", "float64"], + ["acquire", "float64"], + ["release", "float64"], + ["acqrel", "float64"], + ["seqcst", "float64"] + ], + createInstanceWithType: expectedType) + } + + func testAtomicCompareExchange() throws { + let expectedType = ArrowType(.builtin(.ptr), .builtin(.i(64)), .builtin(.i(64)), to: ^TupleType(types: [.builtin(.i(64)), .builtin(.i(1))])) + try assertParse( + instructions: [ + "atomic_cmpxchg", + "atomic_cmpxchgweak" + ], + parameterizedBy: [ + ["relaxed", "relaxed", "i64"], + ["relaxed", "acquire", "i64"], + ["relaxed", "seqcst", "i64"], + ["acquire", "relaxed", "i64"], + ["acquire", "acquire", "i64"], + ["acquire", "seqcst", "i64"], + ["release", "relaxed", "i64"], + ["release", "acquire", "i64"], + ["release", "seqcst", "i64"], + ["acqrel", "relaxed", "i64"], + ["acqrel", "acquire", "i64"], + ["acqrel", "seqcst", "i64"], + ["seqcst", "relaxed", "i64"], + ["seqcst", "acquire", "i64"], + ["seqcst", "seqcst", "i64"] + ], + createInstanceWithType: expectedType) + } + + func testAtomicFence() throws { + let expectedType = ArrowType(to: .void) + try assertParse( + instructions: [ + "atomic_fence", + "atomic_singlethreadfence" + ], + parameterizedBy: [ + ["acquire"], + ["release"], + ["acqrel"], + ["seqcst"] + ], + createInstanceWithType: expectedType) + } + /// For each element in `instructions` and `parameters`, assert that parsing a built-in functions /// named after their concatenation creates an instance with the same stem and parameters, and /// whose type is `expectedType`. From 81e2b04a54ff5521e9980277f1a063285d17b4f1 Mon Sep 17 00:00:00 2001 From: Lucian Radu Teodorescu Date: Sun, 8 Dec 2024 23:25:18 +0200 Subject: [PATCH 07/12] Atomic stores return void --- Sources/CodeGen/LLVM/Transpilation.swift | 6 +++--- Sources/FrontEnd/NativeInstruction.swift | 2 +- Tests/HyloTests/BuiltinFunctionTests.swift | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Sources/CodeGen/LLVM/Transpilation.swift b/Sources/CodeGen/LLVM/Transpilation.swift index 19b594d1d..8f8a93072 100644 --- a/Sources/CodeGen/LLVM/Transpilation.swift +++ b/Sources/CodeGen/LLVM/Transpilation.swift @@ -1149,21 +1149,21 @@ extension SwiftyLLVM.Module { let value = llvm(s.operands[1]) let s = insertStore(value, to: target, at: insertionPoint) setOrdering(.monotonic, for: s) - register[.register(i)] = target + register[.register(i)] = ptr.null case .atomic_store_release: let target = llvm(s.operands[0]) let value = llvm(s.operands[1]) let s = insertStore(value, to: target, at: insertionPoint) setOrdering(.release, for: s) - register[.register(i)] = target + register[.register(i)] = ptr.null case .atomic_store_seqcst: let target = llvm(s.operands[0]) let value = llvm(s.operands[1]) let s = insertStore(value, to: target, at: insertionPoint) setOrdering(.sequentiallyConsistent, for: s) - register[.register(i)] = target + register[.register(i)] = ptr.null case .atomic_swap_relaxed: insert(atomicRMW: .xchg, ordering: .monotonic, for: i) diff --git a/Sources/FrontEnd/NativeInstruction.swift b/Sources/FrontEnd/NativeInstruction.swift index a2ef5acbf..2fe5c8967 100644 --- a/Sources/FrontEnd/NativeInstruction.swift +++ b/Sources/FrontEnd/NativeInstruction.swift @@ -480,7 +480,7 @@ extension NativeInstruction { case .atomic_store_relaxed(let t), .atomic_store_release(let t), .atomic_store_seqcst(let t): - return .init(.builtin(.ptr), ^t, to: .builtin(.ptr)) + return .init(.builtin(.ptr), ^t, to: .void) case .atomic_load_relaxed(let t), .atomic_load_acquire(let t), .atomic_load_seqcst(let t): diff --git a/Tests/HyloTests/BuiltinFunctionTests.swift b/Tests/HyloTests/BuiltinFunctionTests.swift index 433ad5cf6..dbce4a9ce 100644 --- a/Tests/HyloTests/BuiltinFunctionTests.swift +++ b/Tests/HyloTests/BuiltinFunctionTests.swift @@ -219,7 +219,7 @@ final class BuiltinFunctionTests: XCTestCase { } func testAtomicStore() throws { - let expectedType = ArrowType(.builtin(.ptr), .builtin(.i(64)), to: .builtin(.ptr)) + let expectedType = ArrowType(.builtin(.ptr), .builtin(.i(64)), to: .void) try assertParse( instructions: ["atomic_store"], parameterizedBy: [ From 9c2ef4f051658df109558b48e391f43e9ea3e1b3 Mon Sep 17 00:00:00 2001 From: Lucian Radu Teodorescu Date: Tue, 17 Dec 2024 11:40:13 +0200 Subject: [PATCH 08/12] Fix crash at the end of an atomic store. --- Sources/CodeGen/LLVM/Transpilation.swift | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/Sources/CodeGen/LLVM/Transpilation.swift b/Sources/CodeGen/LLVM/Transpilation.swift index 8f8a93072..c11f85b60 100644 --- a/Sources/CodeGen/LLVM/Transpilation.swift +++ b/Sources/CodeGen/LLVM/Transpilation.swift @@ -1149,21 +1149,18 @@ extension SwiftyLLVM.Module { let value = llvm(s.operands[1]) let s = insertStore(value, to: target, at: insertionPoint) setOrdering(.monotonic, for: s) - register[.register(i)] = ptr.null case .atomic_store_release: let target = llvm(s.operands[0]) let value = llvm(s.operands[1]) let s = insertStore(value, to: target, at: insertionPoint) setOrdering(.release, for: s) - register[.register(i)] = ptr.null case .atomic_store_seqcst: let target = llvm(s.operands[0]) let value = llvm(s.operands[1]) let s = insertStore(value, to: target, at: insertionPoint) setOrdering(.sequentiallyConsistent, for: s) - register[.register(i)] = ptr.null case .atomic_swap_relaxed: insert(atomicRMW: .xchg, ordering: .monotonic, for: i) @@ -1716,7 +1713,7 @@ extension SwiftyLLVM.Module { if case .constant(let c) = o { return transpiledConstant(c, in: &context) } else { - return register[o]! + return register[o] ?? transpiledConstant(VoidConstant(), in: &context) } } From 571967b839114aaf7b97de879fe6ca3bde42255f Mon Sep 17 00:00:00 2001 From: Lucian Radu Teodorescu Date: Fri, 20 Dec 2024 12:12:57 +0200 Subject: [PATCH 09/12] Update `Package.resolved` --- Package.resolved | 38 +++++++++++++++----------------------- 1 file changed, 15 insertions(+), 23 deletions(-) diff --git a/Package.resolved b/Package.resolved index 2fb7c43d7..3baabb6a8 100644 --- a/Package.resolved +++ b/Package.resolved @@ -1,12 +1,13 @@ { + "originHash" : "95ff483b2e7dc485aa5e3a5765c4e25dd5ec98b639cdd3ff580e9f41eb484dbb", "pins" : [ { "identity" : "bigint", "kind" : "remoteSourceControl", "location" : "https://github.com/attaswift/BigInt.git", "state" : { - "revision" : "0ed110f7555c34ff468e72e1686e59721f2b0da6", - "version" : "5.3.0" + "revision" : "114343a705df4725dfe7ab8a2a326b8883cfd79c", + "version" : "5.5.1" } }, { @@ -14,8 +15,8 @@ "kind" : "remoteSourceControl", "location" : "https://github.com/hylo-lang/Durian.git", "state" : { - "revision" : "3d802c00e9063b8bba8ba3fff5691e82df7949e1", - "version" : "1.2.0" + "revision" : "16e6096fab2f87fdd29e3c0968951e55f64ff79e", + "version" : "1.2.1" } }, { @@ -41,8 +42,8 @@ "kind" : "remoteSourceControl", "location" : "https://github.com/apple/swift-argument-parser.git", "state" : { - "revision" : "fee6933f37fde9a5e12a1e4aeaa93fe60116ff2a", - "version" : "1.2.2" + "revision" : "41982a3656a71c768319979febd796c6fd111d5c", + "version" : "1.5.0" } }, { @@ -50,8 +51,8 @@ "kind" : "remoteSourceControl", "location" : "https://github.com/apple/swift-collections.git", "state" : { - "revision" : "937e904258d22af6e447a0b72c0bc67583ef64a2", - "version" : "1.0.4" + "revision" : "671108c96644956dddcd89dd59c203dcdb36cec7", + "version" : "1.1.4" } }, { @@ -81,22 +82,13 @@ "version" : "508.0.1" } }, - { - "identity" : "swift-system", - "kind" : "remoteSourceControl", - "location" : "https://github.com/apple/swift-system.git", - "state" : { - "revision" : "836bc4557b74fe6d2660218d56e3ce96aff76574", - "version" : "1.1.1" - } - }, { "identity" : "swift-tools-support-core", "kind" : "remoteSourceControl", "location" : "https://github.com/apple/swift-tools-support-core.git", "state" : { - "revision" : "93784c59434dbca8e8a9e4b700d0d6d94551da6a", - "version" : "0.5.2" + "revision" : "b464fcd8d884e599e3202d9bd1eee29a9e504069", + "version" : "0.7.2" } }, { @@ -105,7 +97,7 @@ "location" : "https://github.com/hylo-lang/Swifty-LLVM", "state" : { "branch" : "main", - "revision" : "6454786c4550b2627f59d01e2e28766283291932" + "revision" : "62cf26ece651a4aba4b6ba9dbbd29c7e69030501" } }, { @@ -113,10 +105,10 @@ "kind" : "remoteSourceControl", "location" : "https://github.com/jpsim/Yams.git", "state" : { - "revision" : "0d9ee7ea8c4ebd4a489ad7a73d5c6cad55d6fed3", - "version" : "5.0.6" + "revision" : "3036ba9d69cf1fd04d433527bc339dc0dc75433d", + "version" : "5.1.3" } } ], - "version" : 2 + "version" : 3 } From 73e69273f8cd7e0abc3289ee1223020ed42c95e6 Mon Sep 17 00:00:00 2001 From: Dimi Racordon Date: Mon, 23 Dec 2024 14:14:16 +0100 Subject: [PATCH 10/12] Fix typo in comments --- Sources/FrontEnd/BuiltinFunction.swift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Sources/FrontEnd/BuiltinFunction.swift b/Sources/FrontEnd/BuiltinFunction.swift index b5d7aa5d9..f067726f9 100644 --- a/Sources/FrontEnd/BuiltinFunction.swift +++ b/Sources/FrontEnd/BuiltinFunction.swift @@ -81,7 +81,7 @@ extension BuiltinFunction { } } - /// Creates a built-in function representing the native instruction nameed `n` or returns `nil` + /// Creates a built-in function representing the native instruction named `n` or returns `nil` /// if `n` isn't a valid native instruction name. private init?(native n: String) { var tokens = n.split(separator: "_")[...] From bfac440cd5df504a0df7b9f1d7f08c39f4f77a52 Mon Sep 17 00:00:00 2001 From: Dimi Racordon Date: Mon, 23 Dec 2024 14:15:30 +0100 Subject: [PATCH 11/12] Refactor the parsing of built-in atomic instructions --- Sources/FrontEnd/BuiltinFunction.swift | 522 ++++++++++++------------- 1 file changed, 261 insertions(+), 261 deletions(-) diff --git a/Sources/FrontEnd/BuiltinFunction.swift b/Sources/FrontEnd/BuiltinFunction.swift index f067726f9..7184ef072 100644 --- a/Sources/FrontEnd/BuiltinFunction.swift +++ b/Sources/FrontEnd/BuiltinFunction.swift @@ -259,268 +259,274 @@ extension BuiltinFunction { self = .init(name: .llvm(.zeroinitializer(t))) case "atomic": - self.init(atomic: n) + if let t = tokens.first, t.contains("fence") { + self.init(fence: n) + } else { + self.init(atomic: n) + } default: return nil } } - /// Creates an atomic built-in function named `n` or returns `nil` if `n` isn't a valid atomic builtin name. - private init?(atomic n: String) { - // Special case for fence instructions; we don't have a type for them. - if n.contains("fence") { - switch n { - case "atomic_fence_acquire": - self = .init(name: .llvm(.atomic_fence_acquire)) - case "atomic_fence_release": - self = .init(name: .llvm(.atomic_fence_release)) - case "atomic_fence_acqrel": - self = .init(name: .llvm(.atomic_fence_acqrel)) - case "atomic_fence_seqcst": - self = .init(name: .llvm(.atomic_fence_seqcst)) - case "atomic_singlethreadfence_acquire": - self = .init(name: .llvm(.atomic_singlethreadfence_acquire)) - case "atomic_singlethreadfence_release": - self = .init(name: .llvm(.atomic_singlethreadfence_release)) - case "atomic_singlethreadfence_acqrel": - self = .init(name: .llvm(.atomic_singlethreadfence_acqrel)) - case "atomic_singlethreadfence_seqcst": - self = .init(name: .llvm(.atomic_singlethreadfence_seqcst)) - default: - return nil - } + /// Creates a built-in function representing the native fence instruction named `n` or returns + /// `nil` if `n` isn't a valid native fence instruction name. + private init?(fence n: String) { + switch n { + case "atomic_fence_acquire": + self = .init(name: .llvm(.atomic_fence_acquire)) + case "atomic_fence_release": + self = .init(name: .llvm(.atomic_fence_release)) + case "atomic_fence_acqrel": + self = .init(name: .llvm(.atomic_fence_acqrel)) + case "atomic_fence_seqcst": + self = .init(name: .llvm(.atomic_fence_seqcst)) + case "atomic_singlethreadfence_acquire": + self = .init(name: .llvm(.atomic_singlethreadfence_acquire)) + case "atomic_singlethreadfence_release": + self = .init(name: .llvm(.atomic_singlethreadfence_release)) + case "atomic_singlethreadfence_acqrel": + self = .init(name: .llvm(.atomic_singlethreadfence_acqrel)) + case "atomic_singlethreadfence_seqcst": + self = .init(name: .llvm(.atomic_singlethreadfence_seqcst)) + default: + return nil } - else { - // For the rest of the atomics we have a type at the end. - guard let (fs, ts) = splitLastUnderscore(n) else { return nil } - guard let t = BuiltinType.init(ts) else { return nil } - switch fs { - case "atomic_store_relaxed": - self = .init(name: .llvm(.atomic_store_relaxed(t))) - case "atomic_store_release": - self = .init(name: .llvm(.atomic_store_release(t))) - case "atomic_store_seqcst": - self = .init(name: .llvm(.atomic_store_seqcst(t))) - case "atomic_load_relaxed": - self = .init(name: .llvm(.atomic_load_relaxed(t))) - case "atomic_load_acquire": - self = .init(name: .llvm(.atomic_load_acquire(t))) - case "atomic_load_seqcst": - self = .init(name: .llvm(.atomic_load_seqcst(t))) - case "atomic_swap_relaxed": - self = .init(name: .llvm(.atomic_swap_relaxed(t))) - case "atomic_swap_acquire": - self = .init(name: .llvm(.atomic_swap_acquire(t))) - case "atomic_swap_release": - self = .init(name: .llvm(.atomic_swap_release(t))) - case "atomic_swap_acqrel": - self = .init(name: .llvm(.atomic_swap_acqrel(t))) - case "atomic_swap_seqcst": - self = .init(name: .llvm(.atomic_swap_seqcst(t))) - case "atomic_add_relaxed": - self = .init(name: .llvm(.atomic_add_relaxed(t))) - case "atomic_add_acquire": - self = .init(name: .llvm(.atomic_add_acquire(t))) - case "atomic_add_release": - self = .init(name: .llvm(.atomic_add_release(t))) - case "atomic_add_acqrel": - self = .init(name: .llvm(.atomic_add_acqrel(t))) - case "atomic_add_seqcst": - self = .init(name: .llvm(.atomic_add_seqcst(t))) - case "atomic_fadd_relaxed": - self = .init(name: .llvm(.atomic_fadd_relaxed(t))) - case "atomic_fadd_acquire": - self = .init(name: .llvm(.atomic_fadd_acquire(t))) - case "atomic_fadd_release": - self = .init(name: .llvm(.atomic_fadd_release(t))) - case "atomic_fadd_acqrel": - self = .init(name: .llvm(.atomic_fadd_acqrel(t))) - case "atomic_fadd_seqcst": - self = .init(name: .llvm(.atomic_fadd_seqcst(t))) - case "atomic_sub_relaxed": - self = .init(name: .llvm(.atomic_sub_relaxed(t))) - case "atomic_sub_acquire": - self = .init(name: .llvm(.atomic_sub_acquire(t))) - case "atomic_sub_release": - self = .init(name: .llvm(.atomic_sub_release(t))) - case "atomic_sub_acqrel": - self = .init(name: .llvm(.atomic_sub_acqrel(t))) - case "atomic_sub_seqcst": - self = .init(name: .llvm(.atomic_sub_seqcst(t))) - case "atomic_fsub_relaxed": - self = .init(name: .llvm(.atomic_fsub_relaxed(t))) - case "atomic_fsub_acquire": - self = .init(name: .llvm(.atomic_fsub_acquire(t))) - case "atomic_fsub_release": - self = .init(name: .llvm(.atomic_fsub_release(t))) - case "atomic_fsub_acqrel": - self = .init(name: .llvm(.atomic_fsub_acqrel(t))) - case "atomic_fsub_seqcst": - self = .init(name: .llvm(.atomic_fsub_seqcst(t))) - case "atomic_max_relaxed": - self = .init(name: .llvm(.atomic_max_relaxed(t))) - case "atomic_max_acquire": - self = .init(name: .llvm(.atomic_max_acquire(t))) - case "atomic_max_release": - self = .init(name: .llvm(.atomic_max_release(t))) - case "atomic_max_acqrel": - self = .init(name: .llvm(.atomic_max_acqrel(t))) - case "atomic_max_seqcst": - self = .init(name: .llvm(.atomic_max_seqcst(t))) - case "atomic_umax_relaxed": - self = .init(name: .llvm(.atomic_umax_relaxed(t))) - case "atomic_umax_acquire": - self = .init(name: .llvm(.atomic_umax_acquire(t))) - case "atomic_umax_release": - self = .init(name: .llvm(.atomic_umax_release(t))) - case "atomic_umax_acqrel": - self = .init(name: .llvm(.atomic_umax_acqrel(t))) - case "atomic_umax_seqcst": - self = .init(name: .llvm(.atomic_umax_seqcst(t))) - case "atomic_fmax_relaxed": - self = .init(name: .llvm(.atomic_fmax_relaxed(t))) - case "atomic_fmax_acquire": - self = .init(name: .llvm(.atomic_fmax_acquire(t))) - case "atomic_fmax_release": - self = .init(name: .llvm(.atomic_fmax_release(t))) - case "atomic_fmax_acqrel": - self = .init(name: .llvm(.atomic_fmax_acqrel(t))) - case "atomic_fmax_seqcst": - self = .init(name: .llvm(.atomic_fmax_seqcst(t))) - case "atomic_min_relaxed": - self = .init(name: .llvm(.atomic_min_relaxed(t))) - case "atomic_min_acquire": - self = .init(name: .llvm(.atomic_min_acquire(t))) - case "atomic_min_release": - self = .init(name: .llvm(.atomic_min_release(t))) - case "atomic_min_acqrel": - self = .init(name: .llvm(.atomic_min_acqrel(t))) - case "atomic_min_seqcst": - self = .init(name: .llvm(.atomic_min_seqcst(t))) - case "atomic_umin_relaxed": - self = .init(name: .llvm(.atomic_umin_relaxed(t))) - case "atomic_umin_acquire": - self = .init(name: .llvm(.atomic_umin_acquire(t))) - case "atomic_umin_release": - self = .init(name: .llvm(.atomic_umin_release(t))) - case "atomic_umin_acqrel": - self = .init(name: .llvm(.atomic_umin_acqrel(t))) - case "atomic_umin_seqcst": - self = .init(name: .llvm(.atomic_umin_seqcst(t))) - case "atomic_fmin_relaxed": - self = .init(name: .llvm(.atomic_fmin_relaxed(t))) - case "atomic_fmin_acquire": - self = .init(name: .llvm(.atomic_fmin_acquire(t))) - case "atomic_fmin_release": - self = .init(name: .llvm(.atomic_fmin_release(t))) - case "atomic_fmin_acqrel": - self = .init(name: .llvm(.atomic_fmin_acqrel(t))) - case "atomic_fmin_seqcst": - self = .init(name: .llvm(.atomic_fmin_seqcst(t))) - case "atomic_and_relaxed": - self = .init(name: .llvm(.atomic_and_relaxed(t))) - case "atomic_and_acquire": - self = .init(name: .llvm(.atomic_and_acquire(t))) - case "atomic_and_release": - self = .init(name: .llvm(.atomic_and_release(t))) - case "atomic_and_acqrel": - self = .init(name: .llvm(.atomic_and_acqrel(t))) - case "atomic_and_seqcst": - self = .init(name: .llvm(.atomic_and_seqcst(t))) - case "atomic_nand_relaxed": - self = .init(name: .llvm(.atomic_nand_relaxed(t))) - case "atomic_nand_acquire": - self = .init(name: .llvm(.atomic_nand_acquire(t))) - case "atomic_nand_release": - self = .init(name: .llvm(.atomic_nand_release(t))) - case "atomic_nand_acqrel": - self = .init(name: .llvm(.atomic_nand_acqrel(t))) - case "atomic_nand_seqcst": - self = .init(name: .llvm(.atomic_nand_seqcst(t))) - case "atomic_or_relaxed": - self = .init(name: .llvm(.atomic_or_relaxed(t))) - case "atomic_or_acquire": - self = .init(name: .llvm(.atomic_or_acquire(t))) - case "atomic_or_release": - self = .init(name: .llvm(.atomic_or_release(t))) - case "atomic_or_acqrel": - self = .init(name: .llvm(.atomic_or_acqrel(t))) - case "atomic_or_seqcst": - self = .init(name: .llvm(.atomic_or_seqcst(t))) - case "atomic_xor_relaxed": - self = .init(name: .llvm(.atomic_xor_relaxed(t))) - case "atomic_xor_acquire": - self = .init(name: .llvm(.atomic_xor_acquire(t))) - case "atomic_xor_release": - self = .init(name: .llvm(.atomic_xor_release(t))) - case "atomic_xor_acqrel": - self = .init(name: .llvm(.atomic_xor_acqrel(t))) - case "atomic_xor_seqcst": - self = .init(name: .llvm(.atomic_xor_seqcst(t))) - case "atomic_cmpxchg_relaxed_relaxed": - self = .init(name: .llvm(.atomic_cmpxchg_relaxed_relaxed(t))) - case "atomic_cmpxchg_relaxed_acquire": - self = .init(name: .llvm(.atomic_cmpxchg_relaxed_acquire(t))) - case "atomic_cmpxchg_relaxed_seqcst": - self = .init(name: .llvm(.atomic_cmpxchg_relaxed_seqcst(t))) - case "atomic_cmpxchg_acquire_relaxed": - self = .init(name: .llvm(.atomic_cmpxchg_acquire_relaxed(t))) - case "atomic_cmpxchg_acquire_acquire": - self = .init(name: .llvm(.atomic_cmpxchg_acquire_acquire(t))) - case "atomic_cmpxchg_acquire_seqcst": - self = .init(name: .llvm(.atomic_cmpxchg_acquire_seqcst(t))) - case "atomic_cmpxchg_release_relaxed": - self = .init(name: .llvm(.atomic_cmpxchg_release_relaxed(t))) - case "atomic_cmpxchg_release_acquire": - self = .init(name: .llvm(.atomic_cmpxchg_release_acquire(t))) - case "atomic_cmpxchg_release_seqcst": - self = .init(name: .llvm(.atomic_cmpxchg_release_seqcst(t))) - case "atomic_cmpxchg_acqrel_relaxed": - self = .init(name: .llvm(.atomic_cmpxchg_acqrel_relaxed(t))) - case "atomic_cmpxchg_acqrel_acquire": - self = .init(name: .llvm(.atomic_cmpxchg_acqrel_acquire(t))) - case "atomic_cmpxchg_acqrel_seqcst": - self = .init(name: .llvm(.atomic_cmpxchg_acqrel_seqcst(t))) - case "atomic_cmpxchg_seqcst_relaxed": - self = .init(name: .llvm(.atomic_cmpxchg_seqcst_relaxed(t))) - case "atomic_cmpxchg_seqcst_acquire": - self = .init(name: .llvm(.atomic_cmpxchg_seqcst_acquire(t))) - case "atomic_cmpxchg_seqcst_seqcst": - self = .init(name: .llvm(.atomic_cmpxchg_seqcst_seqcst(t))) - case "atomic_cmpxchgweak_relaxed_relaxed": - self = .init(name: .llvm(.atomic_cmpxchgweak_relaxed_relaxed(t))) - case "atomic_cmpxchgweak_relaxed_acquire": - self = .init(name: .llvm(.atomic_cmpxchgweak_relaxed_acquire(t))) - case "atomic_cmpxchgweak_relaxed_seqcst": - self = .init(name: .llvm(.atomic_cmpxchgweak_relaxed_seqcst(t))) - case "atomic_cmpxchgweak_acquire_relaxed": - self = .init(name: .llvm(.atomic_cmpxchgweak_acquire_relaxed(t))) - case "atomic_cmpxchgweak_acquire_acquire": - self = .init(name: .llvm(.atomic_cmpxchgweak_acquire_acquire(t))) - case "atomic_cmpxchgweak_acquire_seqcst": - self = .init(name: .llvm(.atomic_cmpxchgweak_acquire_seqcst(t))) - case "atomic_cmpxchgweak_release_relaxed": - self = .init(name: .llvm(.atomic_cmpxchgweak_release_relaxed(t))) - case "atomic_cmpxchgweak_release_acquire": - self = .init(name: .llvm(.atomic_cmpxchgweak_release_acquire(t))) - case "atomic_cmpxchgweak_release_seqcst": - self = .init(name: .llvm(.atomic_cmpxchgweak_release_seqcst(t))) - case "atomic_cmpxchgweak_acqrel_relaxed": - self = .init(name: .llvm(.atomic_cmpxchgweak_acqrel_relaxed(t))) - case "atomic_cmpxchgweak_acqrel_acquire": - self = .init(name: .llvm(.atomic_cmpxchgweak_acqrel_acquire(t))) - case "atomic_cmpxchgweak_acqrel_seqcst": - self = .init(name: .llvm(.atomic_cmpxchgweak_acqrel_seqcst(t))) - case "atomic_cmpxchgweak_seqcst_relaxed": - self = .init(name: .llvm(.atomic_cmpxchgweak_seqcst_relaxed(t))) - case "atomic_cmpxchgweak_seqcst_acquire": - self = .init(name: .llvm(.atomic_cmpxchgweak_seqcst_acquire(t))) - case "atomic_cmpxchgweak_seqcst_seqcst": - self = .init(name: .llvm(.atomic_cmpxchgweak_seqcst_seqcst(t))) - default: - return nil - } + } + + /// Creates a built-in function representing the native atomic instruction named `n` or returns + /// `nil` if `n` isn't a valid native atomic instruction name. + private init?(atomic n: String) { + // The type of all atomics (except fences) is mentioned at the end. + let m = n.split(atLastIndexOf: "_") + guard let t = BuiltinType(m.tail.dropFirst()) else { return nil } + + switch m.head { + case "atomic_store_relaxed": + self = .init(name: .llvm(.atomic_store_relaxed(t))) + case "atomic_store_release": + self = .init(name: .llvm(.atomic_store_release(t))) + case "atomic_store_seqcst": + self = .init(name: .llvm(.atomic_store_seqcst(t))) + case "atomic_load_relaxed": + self = .init(name: .llvm(.atomic_load_relaxed(t))) + case "atomic_load_acquire": + self = .init(name: .llvm(.atomic_load_acquire(t))) + case "atomic_load_seqcst": + self = .init(name: .llvm(.atomic_load_seqcst(t))) + case "atomic_swap_relaxed": + self = .init(name: .llvm(.atomic_swap_relaxed(t))) + case "atomic_swap_acquire": + self = .init(name: .llvm(.atomic_swap_acquire(t))) + case "atomic_swap_release": + self = .init(name: .llvm(.atomic_swap_release(t))) + case "atomic_swap_acqrel": + self = .init(name: .llvm(.atomic_swap_acqrel(t))) + case "atomic_swap_seqcst": + self = .init(name: .llvm(.atomic_swap_seqcst(t))) + case "atomic_add_relaxed": + self = .init(name: .llvm(.atomic_add_relaxed(t))) + case "atomic_add_acquire": + self = .init(name: .llvm(.atomic_add_acquire(t))) + case "atomic_add_release": + self = .init(name: .llvm(.atomic_add_release(t))) + case "atomic_add_acqrel": + self = .init(name: .llvm(.atomic_add_acqrel(t))) + case "atomic_add_seqcst": + self = .init(name: .llvm(.atomic_add_seqcst(t))) + case "atomic_fadd_relaxed": + self = .init(name: .llvm(.atomic_fadd_relaxed(t))) + case "atomic_fadd_acquire": + self = .init(name: .llvm(.atomic_fadd_acquire(t))) + case "atomic_fadd_release": + self = .init(name: .llvm(.atomic_fadd_release(t))) + case "atomic_fadd_acqrel": + self = .init(name: .llvm(.atomic_fadd_acqrel(t))) + case "atomic_fadd_seqcst": + self = .init(name: .llvm(.atomic_fadd_seqcst(t))) + case "atomic_sub_relaxed": + self = .init(name: .llvm(.atomic_sub_relaxed(t))) + case "atomic_sub_acquire": + self = .init(name: .llvm(.atomic_sub_acquire(t))) + case "atomic_sub_release": + self = .init(name: .llvm(.atomic_sub_release(t))) + case "atomic_sub_acqrel": + self = .init(name: .llvm(.atomic_sub_acqrel(t))) + case "atomic_sub_seqcst": + self = .init(name: .llvm(.atomic_sub_seqcst(t))) + case "atomic_fsub_relaxed": + self = .init(name: .llvm(.atomic_fsub_relaxed(t))) + case "atomic_fsub_acquire": + self = .init(name: .llvm(.atomic_fsub_acquire(t))) + case "atomic_fsub_release": + self = .init(name: .llvm(.atomic_fsub_release(t))) + case "atomic_fsub_acqrel": + self = .init(name: .llvm(.atomic_fsub_acqrel(t))) + case "atomic_fsub_seqcst": + self = .init(name: .llvm(.atomic_fsub_seqcst(t))) + case "atomic_max_relaxed": + self = .init(name: .llvm(.atomic_max_relaxed(t))) + case "atomic_max_acquire": + self = .init(name: .llvm(.atomic_max_acquire(t))) + case "atomic_max_release": + self = .init(name: .llvm(.atomic_max_release(t))) + case "atomic_max_acqrel": + self = .init(name: .llvm(.atomic_max_acqrel(t))) + case "atomic_max_seqcst": + self = .init(name: .llvm(.atomic_max_seqcst(t))) + case "atomic_umax_relaxed": + self = .init(name: .llvm(.atomic_umax_relaxed(t))) + case "atomic_umax_acquire": + self = .init(name: .llvm(.atomic_umax_acquire(t))) + case "atomic_umax_release": + self = .init(name: .llvm(.atomic_umax_release(t))) + case "atomic_umax_acqrel": + self = .init(name: .llvm(.atomic_umax_acqrel(t))) + case "atomic_umax_seqcst": + self = .init(name: .llvm(.atomic_umax_seqcst(t))) + case "atomic_fmax_relaxed": + self = .init(name: .llvm(.atomic_fmax_relaxed(t))) + case "atomic_fmax_acquire": + self = .init(name: .llvm(.atomic_fmax_acquire(t))) + case "atomic_fmax_release": + self = .init(name: .llvm(.atomic_fmax_release(t))) + case "atomic_fmax_acqrel": + self = .init(name: .llvm(.atomic_fmax_acqrel(t))) + case "atomic_fmax_seqcst": + self = .init(name: .llvm(.atomic_fmax_seqcst(t))) + case "atomic_min_relaxed": + self = .init(name: .llvm(.atomic_min_relaxed(t))) + case "atomic_min_acquire": + self = .init(name: .llvm(.atomic_min_acquire(t))) + case "atomic_min_release": + self = .init(name: .llvm(.atomic_min_release(t))) + case "atomic_min_acqrel": + self = .init(name: .llvm(.atomic_min_acqrel(t))) + case "atomic_min_seqcst": + self = .init(name: .llvm(.atomic_min_seqcst(t))) + case "atomic_umin_relaxed": + self = .init(name: .llvm(.atomic_umin_relaxed(t))) + case "atomic_umin_acquire": + self = .init(name: .llvm(.atomic_umin_acquire(t))) + case "atomic_umin_release": + self = .init(name: .llvm(.atomic_umin_release(t))) + case "atomic_umin_acqrel": + self = .init(name: .llvm(.atomic_umin_acqrel(t))) + case "atomic_umin_seqcst": + self = .init(name: .llvm(.atomic_umin_seqcst(t))) + case "atomic_fmin_relaxed": + self = .init(name: .llvm(.atomic_fmin_relaxed(t))) + case "atomic_fmin_acquire": + self = .init(name: .llvm(.atomic_fmin_acquire(t))) + case "atomic_fmin_release": + self = .init(name: .llvm(.atomic_fmin_release(t))) + case "atomic_fmin_acqrel": + self = .init(name: .llvm(.atomic_fmin_acqrel(t))) + case "atomic_fmin_seqcst": + self = .init(name: .llvm(.atomic_fmin_seqcst(t))) + case "atomic_and_relaxed": + self = .init(name: .llvm(.atomic_and_relaxed(t))) + case "atomic_and_acquire": + self = .init(name: .llvm(.atomic_and_acquire(t))) + case "atomic_and_release": + self = .init(name: .llvm(.atomic_and_release(t))) + case "atomic_and_acqrel": + self = .init(name: .llvm(.atomic_and_acqrel(t))) + case "atomic_and_seqcst": + self = .init(name: .llvm(.atomic_and_seqcst(t))) + case "atomic_nand_relaxed": + self = .init(name: .llvm(.atomic_nand_relaxed(t))) + case "atomic_nand_acquire": + self = .init(name: .llvm(.atomic_nand_acquire(t))) + case "atomic_nand_release": + self = .init(name: .llvm(.atomic_nand_release(t))) + case "atomic_nand_acqrel": + self = .init(name: .llvm(.atomic_nand_acqrel(t))) + case "atomic_nand_seqcst": + self = .init(name: .llvm(.atomic_nand_seqcst(t))) + case "atomic_or_relaxed": + self = .init(name: .llvm(.atomic_or_relaxed(t))) + case "atomic_or_acquire": + self = .init(name: .llvm(.atomic_or_acquire(t))) + case "atomic_or_release": + self = .init(name: .llvm(.atomic_or_release(t))) + case "atomic_or_acqrel": + self = .init(name: .llvm(.atomic_or_acqrel(t))) + case "atomic_or_seqcst": + self = .init(name: .llvm(.atomic_or_seqcst(t))) + case "atomic_xor_relaxed": + self = .init(name: .llvm(.atomic_xor_relaxed(t))) + case "atomic_xor_acquire": + self = .init(name: .llvm(.atomic_xor_acquire(t))) + case "atomic_xor_release": + self = .init(name: .llvm(.atomic_xor_release(t))) + case "atomic_xor_acqrel": + self = .init(name: .llvm(.atomic_xor_acqrel(t))) + case "atomic_xor_seqcst": + self = .init(name: .llvm(.atomic_xor_seqcst(t))) + case "atomic_cmpxchg_relaxed_relaxed": + self = .init(name: .llvm(.atomic_cmpxchg_relaxed_relaxed(t))) + case "atomic_cmpxchg_relaxed_acquire": + self = .init(name: .llvm(.atomic_cmpxchg_relaxed_acquire(t))) + case "atomic_cmpxchg_relaxed_seqcst": + self = .init(name: .llvm(.atomic_cmpxchg_relaxed_seqcst(t))) + case "atomic_cmpxchg_acquire_relaxed": + self = .init(name: .llvm(.atomic_cmpxchg_acquire_relaxed(t))) + case "atomic_cmpxchg_acquire_acquire": + self = .init(name: .llvm(.atomic_cmpxchg_acquire_acquire(t))) + case "atomic_cmpxchg_acquire_seqcst": + self = .init(name: .llvm(.atomic_cmpxchg_acquire_seqcst(t))) + case "atomic_cmpxchg_release_relaxed": + self = .init(name: .llvm(.atomic_cmpxchg_release_relaxed(t))) + case "atomic_cmpxchg_release_acquire": + self = .init(name: .llvm(.atomic_cmpxchg_release_acquire(t))) + case "atomic_cmpxchg_release_seqcst": + self = .init(name: .llvm(.atomic_cmpxchg_release_seqcst(t))) + case "atomic_cmpxchg_acqrel_relaxed": + self = .init(name: .llvm(.atomic_cmpxchg_acqrel_relaxed(t))) + case "atomic_cmpxchg_acqrel_acquire": + self = .init(name: .llvm(.atomic_cmpxchg_acqrel_acquire(t))) + case "atomic_cmpxchg_acqrel_seqcst": + self = .init(name: .llvm(.atomic_cmpxchg_acqrel_seqcst(t))) + case "atomic_cmpxchg_seqcst_relaxed": + self = .init(name: .llvm(.atomic_cmpxchg_seqcst_relaxed(t))) + case "atomic_cmpxchg_seqcst_acquire": + self = .init(name: .llvm(.atomic_cmpxchg_seqcst_acquire(t))) + case "atomic_cmpxchg_seqcst_seqcst": + self = .init(name: .llvm(.atomic_cmpxchg_seqcst_seqcst(t))) + case "atomic_cmpxchgweak_relaxed_relaxed": + self = .init(name: .llvm(.atomic_cmpxchgweak_relaxed_relaxed(t))) + case "atomic_cmpxchgweak_relaxed_acquire": + self = .init(name: .llvm(.atomic_cmpxchgweak_relaxed_acquire(t))) + case "atomic_cmpxchgweak_relaxed_seqcst": + self = .init(name: .llvm(.atomic_cmpxchgweak_relaxed_seqcst(t))) + case "atomic_cmpxchgweak_acquire_relaxed": + self = .init(name: .llvm(.atomic_cmpxchgweak_acquire_relaxed(t))) + case "atomic_cmpxchgweak_acquire_acquire": + self = .init(name: .llvm(.atomic_cmpxchgweak_acquire_acquire(t))) + case "atomic_cmpxchgweak_acquire_seqcst": + self = .init(name: .llvm(.atomic_cmpxchgweak_acquire_seqcst(t))) + case "atomic_cmpxchgweak_release_relaxed": + self = .init(name: .llvm(.atomic_cmpxchgweak_release_relaxed(t))) + case "atomic_cmpxchgweak_release_acquire": + self = .init(name: .llvm(.atomic_cmpxchgweak_release_acquire(t))) + case "atomic_cmpxchgweak_release_seqcst": + self = .init(name: .llvm(.atomic_cmpxchgweak_release_seqcst(t))) + case "atomic_cmpxchgweak_acqrel_relaxed": + self = .init(name: .llvm(.atomic_cmpxchgweak_acqrel_relaxed(t))) + case "atomic_cmpxchgweak_acqrel_acquire": + self = .init(name: .llvm(.atomic_cmpxchgweak_acqrel_acquire(t))) + case "atomic_cmpxchgweak_acqrel_seqcst": + self = .init(name: .llvm(.atomic_cmpxchgweak_acqrel_seqcst(t))) + case "atomic_cmpxchgweak_seqcst_relaxed": + self = .init(name: .llvm(.atomic_cmpxchgweak_seqcst_relaxed(t))) + case "atomic_cmpxchgweak_seqcst_acquire": + self = .init(name: .llvm(.atomic_cmpxchgweak_seqcst_acquire(t))) + case "atomic_cmpxchgweak_seqcst_seqcst": + self = .init(name: .llvm(.atomic_cmpxchgweak_seqcst_seqcst(t))) + default: + return nil } } @@ -596,12 +602,6 @@ private func take( } } -/// Splits `s` into a pair `(prefix, suffix)` at the last underscore character, or returns `nil`. -private func splitLastUnderscore(_ s: String) -> (String, String)? { - guard let i = s.lastIndex(of: "_") else { return nil } - return (String(s[..) -> BuiltinType? { stream.popFirst().flatMap(BuiltinType.init(_:)) From a3fd992e3265c968673d49af490921ea8685e2e2 Mon Sep 17 00:00:00 2001 From: Dimi Racordon Date: Mon, 23 Dec 2024 14:16:01 +0100 Subject: [PATCH 12/12] Normalize the swift-case describing built-in atomics --- Sources/FrontEnd/NativeInstruction.swift | 361 +++++++++++++++-------- 1 file changed, 233 insertions(+), 128 deletions(-) diff --git a/Sources/FrontEnd/NativeInstruction.swift b/Sources/FrontEnd/NativeInstruction.swift index 2fe5c8967..7d591a99c 100644 --- a/Sources/FrontEnd/NativeInstruction.swift +++ b/Sources/FrontEnd/NativeInstruction.swift @@ -477,138 +477,243 @@ extension NativeInstruction { return .init(to: ^t) case .advancedByBytes(let byteOffset): return .init(.builtin(.ptr), ^byteOffset, to: .builtin(.ptr)) - case .atomic_store_relaxed(let t), - .atomic_store_release(let t), - .atomic_store_seqcst(let t): + case .atomic_store_relaxed(let t): + return .init(.builtin(.ptr), ^t, to: .void) + case .atomic_store_release(let t): + return .init(.builtin(.ptr), ^t, to: .void) + case .atomic_store_seqcst(let t): return .init(.builtin(.ptr), ^t, to: .void) - case .atomic_load_relaxed(let t), - .atomic_load_acquire(let t), - .atomic_load_seqcst(let t): + case .atomic_load_relaxed(let t): + return .init(.builtin(.ptr), to: ^t) + case .atomic_load_acquire(let t): return .init(.builtin(.ptr), to: ^t) - case .atomic_swap_relaxed(let t), - .atomic_swap_acquire(let t), - .atomic_swap_release(let t), - .atomic_swap_acqrel(let t), - .atomic_swap_seqcst(let t): - return .init(.builtin(.ptr), ^t, to: ^t) - case .atomic_add_relaxed(let t), - .atomic_add_acquire(let t), - .atomic_add_release(let t), - .atomic_add_acqrel(let t), - .atomic_add_seqcst(let t), - .atomic_fadd_relaxed(let t), - .atomic_fadd_acquire(let t), - .atomic_fadd_release(let t), - .atomic_fadd_acqrel(let t), - .atomic_fadd_seqcst(let t): - return .init(.builtin(.ptr), ^t, to: ^t) - case .atomic_sub_relaxed(let t), - .atomic_sub_acquire(let t), - .atomic_sub_release(let t), - .atomic_sub_acqrel(let t), - .atomic_sub_seqcst(let t), - .atomic_fsub_relaxed(let t), - .atomic_fsub_acquire(let t), - .atomic_fsub_release(let t), - .atomic_fsub_acqrel(let t), - .atomic_fsub_seqcst(let t): - return .init(.builtin(.ptr), ^t, to: ^t) - case .atomic_max_relaxed(let t), - .atomic_max_acquire(let t), - .atomic_max_release(let t), - .atomic_max_acqrel(let t), - .atomic_max_seqcst(let t), - .atomic_umax_relaxed(let t), - .atomic_umax_acquire(let t), - .atomic_umax_release(let t), - .atomic_umax_acqrel(let t), - .atomic_umax_seqcst(let t), - .atomic_fmax_relaxed(let t), - .atomic_fmax_acquire(let t), - .atomic_fmax_release(let t), - .atomic_fmax_acqrel(let t), - .atomic_fmax_seqcst(let t): - return .init(.builtin(.ptr), ^t, to: ^t) - case .atomic_min_relaxed(let t), - .atomic_min_acquire(let t), - .atomic_min_release(let t), - .atomic_min_acqrel(let t), - .atomic_min_seqcst(let t), - .atomic_umin_relaxed(let t), - .atomic_umin_acquire(let t), - .atomic_umin_release(let t), - .atomic_umin_acqrel(let t), - .atomic_umin_seqcst(let t), - .atomic_fmin_relaxed(let t), - .atomic_fmin_acquire(let t), - .atomic_fmin_release(let t), - .atomic_fmin_acqrel(let t), - .atomic_fmin_seqcst(let t): - return .init(.builtin(.ptr), ^t, to: ^t) - case .atomic_and_relaxed(let t), - .atomic_and_acquire(let t), - .atomic_and_release(let t), - .atomic_and_acqrel(let t), - .atomic_and_seqcst(let t): - return .init(.builtin(.ptr), ^t, to: ^t) - case .atomic_nand_relaxed(let t), - .atomic_nand_acquire(let t), - .atomic_nand_release(let t), - .atomic_nand_acqrel(let t), - .atomic_nand_seqcst(let t): - return .init(.builtin(.ptr), ^t, to: ^t) - case .atomic_or_relaxed(let t), - .atomic_or_acquire(let t), - .atomic_or_release(let t), - .atomic_or_acqrel(let t), - .atomic_or_seqcst(let t): - return .init(.builtin(.ptr), ^t, to: ^t) - case .atomic_xor_relaxed(let t), - .atomic_xor_acquire(let t), - .atomic_xor_release(let t), - .atomic_xor_acqrel(let t), - .atomic_xor_seqcst(let t): - return .init(.builtin(.ptr), ^t, to: ^t) - case .atomic_cmpxchg_relaxed_relaxed(let t), - .atomic_cmpxchg_relaxed_acquire(let t), - .atomic_cmpxchg_relaxed_seqcst(let t), - .atomic_cmpxchg_acquire_relaxed(let t), - .atomic_cmpxchg_acquire_acquire(let t), - .atomic_cmpxchg_acquire_seqcst(let t), - .atomic_cmpxchg_release_relaxed(let t), - .atomic_cmpxchg_release_acquire(let t), - .atomic_cmpxchg_release_seqcst(let t), - .atomic_cmpxchg_acqrel_relaxed(let t), - .atomic_cmpxchg_acqrel_acquire(let t), - .atomic_cmpxchg_acqrel_seqcst(let t), - .atomic_cmpxchg_seqcst_relaxed(let t), - .atomic_cmpxchg_seqcst_acquire(let t), - .atomic_cmpxchg_seqcst_seqcst(let t): + case .atomic_load_seqcst(let t): + return .init(.builtin(.ptr), to: ^t) + case .atomic_swap_relaxed(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_swap_acquire(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_swap_release(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_swap_acqrel(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_swap_seqcst(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_add_relaxed(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_add_acquire(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_add_release(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_add_acqrel(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_add_seqcst(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_fadd_relaxed(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_fadd_acquire(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_fadd_release(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_fadd_acqrel(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_fadd_seqcst(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_sub_relaxed(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_sub_acquire(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_sub_release(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_sub_acqrel(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_sub_seqcst(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_fsub_relaxed(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_fsub_acquire(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_fsub_release(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_fsub_acqrel(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_fsub_seqcst(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_max_relaxed(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_max_acquire(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_max_release(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_max_acqrel(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_max_seqcst(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_umax_relaxed(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_umax_acquire(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_umax_release(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_umax_acqrel(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_umax_seqcst(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_fmax_relaxed(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_fmax_acquire(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_fmax_release(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_fmax_acqrel(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_fmax_seqcst(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_min_relaxed(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_min_acquire(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_min_release(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_min_acqrel(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_min_seqcst(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_umin_relaxed(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_umin_acquire(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_umin_release(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_umin_acqrel(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_umin_seqcst(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_fmin_relaxed(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_fmin_acquire(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_fmin_release(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_fmin_acqrel(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_fmin_seqcst(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_and_relaxed(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_and_acquire(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_and_release(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_and_acqrel(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_and_seqcst(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_nand_relaxed(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_nand_acquire(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_nand_release(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_nand_acqrel(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_nand_seqcst(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_or_relaxed(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_or_acquire(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_or_release(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_or_acqrel(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_or_seqcst(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_xor_relaxed(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_xor_acquire(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_xor_release(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_xor_acqrel(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_xor_seqcst(let t): + return .init(.builtin(.ptr), ^t, to: ^t) + case .atomic_cmpxchg_relaxed_relaxed(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchg_relaxed_acquire(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchg_relaxed_seqcst(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchg_acquire_relaxed(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchg_acquire_acquire(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchg_acquire_seqcst(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchg_release_relaxed(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchg_release_acquire(let t): return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) - case .atomic_cmpxchgweak_relaxed_relaxed(let t), - .atomic_cmpxchgweak_relaxed_acquire(let t), - .atomic_cmpxchgweak_relaxed_seqcst(let t), - .atomic_cmpxchgweak_acquire_relaxed(let t), - .atomic_cmpxchgweak_acquire_acquire(let t), - .atomic_cmpxchgweak_acquire_seqcst(let t), - .atomic_cmpxchgweak_release_relaxed(let t), - .atomic_cmpxchgweak_release_acquire(let t), - .atomic_cmpxchgweak_release_seqcst(let t), - .atomic_cmpxchgweak_acqrel_relaxed(let t), - .atomic_cmpxchgweak_acqrel_acquire(let t), - .atomic_cmpxchgweak_acqrel_seqcst(let t), - .atomic_cmpxchgweak_seqcst_relaxed(let t), - .atomic_cmpxchgweak_seqcst_acquire(let t), - .atomic_cmpxchgweak_seqcst_seqcst(let t): + case .atomic_cmpxchg_release_seqcst(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchg_acqrel_relaxed(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchg_acqrel_acquire(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchg_acqrel_seqcst(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchg_seqcst_relaxed(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchg_seqcst_acquire(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchg_seqcst_seqcst(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchgweak_relaxed_relaxed(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchgweak_relaxed_acquire(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchgweak_relaxed_seqcst(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchgweak_acquire_relaxed(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchgweak_acquire_acquire(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchgweak_acquire_seqcst(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchgweak_release_relaxed(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchgweak_release_acquire(let t): return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) - case .atomic_fence_acquire, - .atomic_fence_release, - .atomic_fence_acqrel, - .atomic_fence_seqcst, - .atomic_singlethreadfence_acquire, - .atomic_singlethreadfence_release, - .atomic_singlethreadfence_acqrel, - .atomic_singlethreadfence_seqcst: + case .atomic_cmpxchgweak_release_seqcst(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchgweak_acqrel_relaxed(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchgweak_acqrel_acquire(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchgweak_acqrel_seqcst(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchgweak_seqcst_relaxed(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchgweak_seqcst_acquire(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_cmpxchgweak_seqcst_seqcst(let t): + return .init(.builtin(.ptr), ^t, ^t, to: ^TupleType(types: [^t, .builtin(.i(1))])) + case .atomic_fence_acquire: + return .init(to: .void) + case .atomic_fence_release: + return .init(to: .void) + case .atomic_fence_acqrel: + return .init(to: .void) + case .atomic_fence_seqcst: + return .init(to: .void) + case .atomic_singlethreadfence_acquire: + return .init(to: .void) + case .atomic_singlethreadfence_release: + return .init(to: .void) + case .atomic_singlethreadfence_acqrel: + return .init(to: .void) + case .atomic_singlethreadfence_seqcst: return .init(to: .void) } }