-
Notifications
You must be signed in to change notification settings - Fork 12.9k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[RISCV] Separate HW/SW shadow stack on RISC-V #112478
Conversation
@llvm/pr-subscribers-backend-risc-v Author: Jesse Huang (jaidTw) ChangesThis patch follows #112477. Patch is 27.44 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/112478.diff 2 Files Affected:
diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
index f388376c12c943..0b0e00ccd24925 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -58,10 +58,14 @@ static const std::pair<MCPhysReg, int8_t> FixedCSRFIMap[] = {
static void emitSCSPrologue(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const DebugLoc &DL) {
- if (!MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack))
+ const auto &STI = MF.getSubtarget<RISCVSubtarget>();
+ bool HasHWShadowStack =
+ MF.getFunction().hasFnAttribute("hw-shadow-stack") && STI.hasStdExtZimop();
+ bool HasSWShadowStack =
+ MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
+ if (!HasHWShadowStack && !HasSWShadowStack)
return;
- const auto &STI = MF.getSubtarget<RISCVSubtarget>();
const llvm::RISCVRegisterInfo *TRI = STI.getRegisterInfo();
Register RAReg = TRI->getRARegister();
@@ -73,7 +77,7 @@ static void emitSCSPrologue(MachineFunction &MF, MachineBasicBlock &MBB,
return;
const RISCVInstrInfo *TII = STI.getInstrInfo();
- if (!STI.hasForcedSWShadowStack() && STI.hasStdExtZicfiss()) {
+ if (HasHWShadowStack) {
BuildMI(MBB, MI, DL, TII->get(RISCV::SSPUSH)).addReg(RAReg);
return;
}
@@ -120,10 +124,14 @@ static void emitSCSPrologue(MachineFunction &MF, MachineBasicBlock &MBB,
static void emitSCSEpilogue(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const DebugLoc &DL) {
- if (!MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack))
+ const auto &STI = MF.getSubtarget<RISCVSubtarget>();
+ bool HasHWShadowStack =
+ MF.getFunction().hasFnAttribute("hw-shadow-stack") && STI.hasStdExtZimop();
+ bool HasSWShadowStack =
+ MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
+ if (!HasHWShadowStack && !HasSWShadowStack)
return;
- const auto &STI = MF.getSubtarget<RISCVSubtarget>();
Register RAReg = STI.getRegisterInfo()->getRARegister();
// See emitSCSPrologue() above.
@@ -133,7 +141,7 @@ static void emitSCSEpilogue(MachineFunction &MF, MachineBasicBlock &MBB,
return;
const RISCVInstrInfo *TII = STI.getInstrInfo();
- if (!STI.hasForcedSWShadowStack() && STI.hasStdExtZicfiss()) {
+ if (HasHWShadowStack) {
BuildMI(MBB, MI, DL, TII->get(RISCV::SSPOPCHK)).addReg(RAReg);
return;
}
diff --git a/llvm/test/CodeGen/RISCV/shadowcallstack.ll b/llvm/test/CodeGen/RISCV/shadowcallstack.ll
index a320b44d2c6a81..f0f26cc715a487 100644
--- a/llvm/test/CodeGen/RISCV/shadowcallstack.ll
+++ b/llvm/test/CodeGen/RISCV/shadowcallstack.ll
@@ -7,10 +7,6 @@
; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=RV32-ZICFISS
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zicfiss < %s \
; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=RV64-ZICFISS
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-zicfiss,forced-sw-shadow-stack \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV32
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-zicfiss,forced-sw-shadow-stack \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV64
define void @f1() shadowcallstack {
; RV32-LABEL: f1:
@@ -92,7 +88,9 @@ define i32 @f3() shadowcallstack {
;
; RV32-ZICFISS-LABEL: f3:
; RV32-ZICFISS: # %bb.0:
-; RV32-ZICFISS-NEXT: sspush ra
+; RV32-ZICFISS-NEXT: addi gp, gp, 4
+; RV32-ZICFISS-NEXT: sw ra, -4(gp)
+; RV32-ZICFISS-NEXT: .cfi_escape 0x16, 0x03, 0x02, 0x73, 0x7c #
; RV32-ZICFISS-NEXT: addi sp, sp, -16
; RV32-ZICFISS-NEXT: .cfi_def_cfa_offset 16
; RV32-ZICFISS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
@@ -100,12 +98,16 @@ define i32 @f3() shadowcallstack {
; RV32-ZICFISS-NEXT: call bar
; RV32-ZICFISS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ZICFISS-NEXT: addi sp, sp, 16
-; RV32-ZICFISS-NEXT: sspopchk ra
+; RV32-ZICFISS-NEXT: lw ra, -4(gp)
+; RV32-ZICFISS-NEXT: addi gp, gp, -4
+; RV32-ZICFISS-NEXT: .cfi_restore gp
; RV32-ZICFISS-NEXT: ret
;
; RV64-ZICFISS-LABEL: f3:
; RV64-ZICFISS: # %bb.0:
-; RV64-ZICFISS-NEXT: sspush ra
+; RV64-ZICFISS-NEXT: addi gp, gp, 8
+; RV64-ZICFISS-NEXT: sd ra, -8(gp)
+; RV64-ZICFISS-NEXT: .cfi_escape 0x16, 0x03, 0x02, 0x73, 0x78 #
; RV64-ZICFISS-NEXT: addi sp, sp, -16
; RV64-ZICFISS-NEXT: .cfi_def_cfa_offset 16
; RV64-ZICFISS-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
@@ -113,7 +115,9 @@ define i32 @f3() shadowcallstack {
; RV64-ZICFISS-NEXT: call bar
; RV64-ZICFISS-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ZICFISS-NEXT: addi sp, sp, 16
-; RV64-ZICFISS-NEXT: sspopchk ra
+; RV64-ZICFISS-NEXT: ld ra, -8(gp)
+; RV64-ZICFISS-NEXT: addi gp, gp, -8
+; RV64-ZICFISS-NEXT: .cfi_restore gp
; RV64-ZICFISS-NEXT: ret
%res = call i32 @bar()
%res1 = add i32 %res, 1
@@ -193,7 +197,9 @@ define i32 @f4() shadowcallstack {
;
; RV32-ZICFISS-LABEL: f4:
; RV32-ZICFISS: # %bb.0:
-; RV32-ZICFISS-NEXT: sspush ra
+; RV32-ZICFISS-NEXT: addi gp, gp, 4
+; RV32-ZICFISS-NEXT: sw ra, -4(gp)
+; RV32-ZICFISS-NEXT: .cfi_escape 0x16, 0x03, 0x02, 0x73, 0x7c #
; RV32-ZICFISS-NEXT: addi sp, sp, -16
; RV32-ZICFISS-NEXT: .cfi_def_cfa_offset 16
; RV32-ZICFISS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
@@ -219,12 +225,16 @@ define i32 @f4() shadowcallstack {
; RV32-ZICFISS-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32-ZICFISS-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32-ZICFISS-NEXT: addi sp, sp, 16
-; RV32-ZICFISS-NEXT: sspopchk ra
+; RV32-ZICFISS-NEXT: lw ra, -4(gp)
+; RV32-ZICFISS-NEXT: addi gp, gp, -4
+; RV32-ZICFISS-NEXT: .cfi_restore gp
; RV32-ZICFISS-NEXT: ret
;
; RV64-ZICFISS-LABEL: f4:
; RV64-ZICFISS: # %bb.0:
-; RV64-ZICFISS-NEXT: sspush ra
+; RV64-ZICFISS-NEXT: addi gp, gp, 8
+; RV64-ZICFISS-NEXT: sd ra, -8(gp)
+; RV64-ZICFISS-NEXT: .cfi_escape 0x16, 0x03, 0x02, 0x73, 0x78 #
; RV64-ZICFISS-NEXT: addi sp, sp, -32
; RV64-ZICFISS-NEXT: .cfi_def_cfa_offset 32
; RV64-ZICFISS-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
@@ -250,7 +260,9 @@ define i32 @f4() shadowcallstack {
; RV64-ZICFISS-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
; RV64-ZICFISS-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
; RV64-ZICFISS-NEXT: addi sp, sp, 32
-; RV64-ZICFISS-NEXT: sspopchk ra
+; RV64-ZICFISS-NEXT: ld ra, -8(gp)
+; RV64-ZICFISS-NEXT: addi gp, gp, -8
+; RV64-ZICFISS-NEXT: .cfi_restore gp
; RV64-ZICFISS-NEXT: ret
%res1 = call i32 @bar()
%res2 = call i32 @bar()
@@ -291,16 +303,618 @@ define i32 @f5() shadowcallstack nounwind {
;
; RV32-ZICFISS-LABEL: f5:
; RV32-ZICFISS: # %bb.0:
+; RV32-ZICFISS-NEXT: addi gp, gp, 4
+; RV32-ZICFISS-NEXT: sw ra, -4(gp)
+; RV32-ZICFISS-NEXT: addi sp, sp, -16
+; RV32-ZICFISS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ZICFISS-NEXT: call bar
+; RV32-ZICFISS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ZICFISS-NEXT: addi sp, sp, 16
+; RV32-ZICFISS-NEXT: lw ra, -4(gp)
+; RV32-ZICFISS-NEXT: addi gp, gp, -4
+; RV32-ZICFISS-NEXT: ret
+;
+; RV64-ZICFISS-LABEL: f5:
+; RV64-ZICFISS: # %bb.0:
+; RV64-ZICFISS-NEXT: addi gp, gp, 8
+; RV64-ZICFISS-NEXT: sd ra, -8(gp)
+; RV64-ZICFISS-NEXT: addi sp, sp, -16
+; RV64-ZICFISS-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-ZICFISS-NEXT: call bar
+; RV64-ZICFISS-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-ZICFISS-NEXT: addi sp, sp, 16
+; RV64-ZICFISS-NEXT: ld ra, -8(gp)
+; RV64-ZICFISS-NEXT: addi gp, gp, -8
+; RV64-ZICFISS-NEXT: ret
+ %res = call i32 @bar()
+ %res1 = add i32 %res, 1
+ ret i32 %res
+}
+
+define void @f1_hw() "hw-shadow-stack" {
+; RV32-LABEL: f1_hw:
+; RV32: # %bb.0:
+; RV32-NEXT: ret
+;
+; RV64-LABEL: f1_hw:
+; RV64: # %bb.0:
+; RV64-NEXT: ret
+;
+; RV32-ZICFISS-LABEL: f1_hw:
+; RV32-ZICFISS: # %bb.0:
+; RV32-ZICFISS-NEXT: ret
+;
+; RV64-ZICFISS-LABEL: f1_hw:
+; RV64-ZICFISS: # %bb.0:
+; RV64-ZICFISS-NEXT: ret
+ ret void
+}
+
+define void @f2_hw() "hw-shadow-stack" {
+; RV32-LABEL: f2_hw:
+; RV32: # %bb.0:
+; RV32-NEXT: tail foo
+;
+; RV64-LABEL: f2_hw:
+; RV64: # %bb.0:
+; RV64-NEXT: tail foo
+;
+; RV32-ZICFISS-LABEL: f2_hw:
+; RV32-ZICFISS: # %bb.0:
+; RV32-ZICFISS-NEXT: tail foo
+;
+; RV64-ZICFISS-LABEL: f2_hw:
+; RV64-ZICFISS: # %bb.0:
+; RV64-ZICFISS-NEXT: tail foo
+ tail call void @foo()
+ ret void
+}
+
+define i32 @f3_hw() "hw-shadow-stack" {
+; RV32-LABEL: f3_hw:
+; RV32: # %bb.0:
+; RV32-NEXT: addi gp, gp, 4
+; RV32-NEXT: sw ra, -4(gp)
+; RV32-NEXT: .cfi_escape 0x16, 0x03, 0x02, 0x73, 0x7c #
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: call bar
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: lw ra, -4(gp)
+; RV32-NEXT: addi gp, gp, -4
+; RV32-NEXT: .cfi_restore gp
+; RV32-NEXT: ret
+;
+; RV64-LABEL: f3_hw:
+; RV64: # %bb.0:
+; RV64-NEXT: addi gp, gp, 8
+; RV64-NEXT: sd ra, -8(gp)
+; RV64-NEXT: .cfi_escape 0x16, 0x03, 0x02, 0x73, 0x78 #
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: call bar
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ld ra, -8(gp)
+; RV64-NEXT: addi gp, gp, -8
+; RV64-NEXT: .cfi_restore gp
+; RV64-NEXT: ret
+;
+; RV32-ZICFISS-LABEL: f3_hw:
+; RV32-ZICFISS: # %bb.0:
; RV32-ZICFISS-NEXT: sspush ra
; RV32-ZICFISS-NEXT: addi sp, sp, -16
+; RV32-ZICFISS-NEXT: .cfi_def_cfa_offset 16
; RV32-ZICFISS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ZICFISS-NEXT: .cfi_offset ra, -4
; RV32-ZICFISS-NEXT: call bar
; RV32-ZICFISS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ZICFISS-NEXT: addi sp, sp, 16
; RV32-ZICFISS-NEXT: sspopchk ra
; RV32-ZICFISS-NEXT: ret
;
-; RV64-ZICFISS-LABEL: f5:
+; RV64-ZICFISS-LABEL: f3_hw:
+; RV64-ZICFISS: # %bb.0:
+; RV64-ZICFISS-NEXT: sspush ra
+; RV64-ZICFISS-NEXT: addi sp, sp, -16
+; RV64-ZICFISS-NEXT: .cfi_def_cfa_offset 16
+; RV64-ZICFISS-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-ZICFISS-NEXT: .cfi_offset ra, -8
+; RV64-ZICFISS-NEXT: call bar
+; RV64-ZICFISS-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-ZICFISS-NEXT: addi sp, sp, 16
+; RV64-ZICFISS-NEXT: sspopchk ra
+; RV64-ZICFISS-NEXT: ret
+ %res = call i32 @bar()
+ %res1 = add i32 %res, 1
+ ret i32 %res
+}
+
+define i32 @f4_hw() "hw-shadow-stack" {
+; RV32-LABEL: f4_hw:
+; RV32: # %bb.0:
+; RV32-NEXT: addi gp, gp, 4
+; RV32-NEXT: sw ra, -4(gp)
+; RV32-NEXT: .cfi_escape 0x16, 0x03, 0x02, 0x73, 0x7c #
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset s2, -16
+; RV32-NEXT: call bar
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: call bar
+; RV32-NEXT: mv s1, a0
+; RV32-NEXT: call bar
+; RV32-NEXT: mv s2, a0
+; RV32-NEXT: call bar
+; RV32-NEXT: add s0, s0, s1
+; RV32-NEXT: add a0, s2, a0
+; RV32-NEXT: add a0, s0, a0
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: lw ra, -4(gp)
+; RV32-NEXT: addi gp, gp, -4
+; RV32-NEXT: .cfi_restore gp
+; RV32-NEXT: ret
+;
+; RV64-LABEL: f4_hw:
+; RV64: # %bb.0:
+; RV64-NEXT: addi gp, gp, 8
+; RV64-NEXT: sd ra, -8(gp)
+; RV64-NEXT: .cfi_escape 0x16, 0x03, 0x02, 0x73, 0x78 #
+; RV64-NEXT: addi sp, sp, -32
+; RV64-NEXT: .cfi_def_cfa_offset 32
+; RV64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset s2, -32
+; RV64-NEXT: call bar
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: call bar
+; RV64-NEXT: mv s1, a0
+; RV64-NEXT: call bar
+; RV64-NEXT: mv s2, a0
+; RV64-NEXT: call bar
+; RV64-NEXT: add s0, s0, s1
+; RV64-NEXT: add a0, s2, a0
+; RV64-NEXT: addw a0, s0, a0
+; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 32
+; RV64-NEXT: ld ra, -8(gp)
+; RV64-NEXT: addi gp, gp, -8
+; RV64-NEXT: .cfi_restore gp
+; RV64-NEXT: ret
+;
+; RV32-ZICFISS-LABEL: f4_hw:
+; RV32-ZICFISS: # %bb.0:
+; RV32-ZICFISS-NEXT: sspush ra
+; RV32-ZICFISS-NEXT: addi sp, sp, -16
+; RV32-ZICFISS-NEXT: .cfi_def_cfa_offset 16
+; RV32-ZICFISS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ZICFISS-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-ZICFISS-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32-ZICFISS-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
+; RV32-ZICFISS-NEXT: .cfi_offset ra, -4
+; RV32-ZICFISS-NEXT: .cfi_offset s0, -8
+; RV32-ZICFISS-NEXT: .cfi_offset s1, -12
+; RV32-ZICFISS-NEXT: .cfi_offset s2, -16
+; RV32-ZICFISS-NEXT: call bar
+; RV32-ZICFISS-NEXT: mv s0, a0
+; RV32-ZICFISS-NEXT: call bar
+; RV32-ZICFISS-NEXT: mv s1, a0
+; RV32-ZICFISS-NEXT: call bar
+; RV32-ZICFISS-NEXT: mv s2, a0
+; RV32-ZICFISS-NEXT: call bar
+; RV32-ZICFISS-NEXT: add s0, s0, s1
+; RV32-ZICFISS-NEXT: add a0, s2, a0
+; RV32-ZICFISS-NEXT: add a0, s0, a0
+; RV32-ZICFISS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ZICFISS-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-ZICFISS-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32-ZICFISS-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
+; RV32-ZICFISS-NEXT: addi sp, sp, 16
+; RV32-ZICFISS-NEXT: sspopchk ra
+; RV32-ZICFISS-NEXT: ret
+;
+; RV64-ZICFISS-LABEL: f4_hw:
+; RV64-ZICFISS: # %bb.0:
+; RV64-ZICFISS-NEXT: sspush ra
+; RV64-ZICFISS-NEXT: addi sp, sp, -32
+; RV64-ZICFISS-NEXT: .cfi_def_cfa_offset 32
+; RV64-ZICFISS-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-ZICFISS-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-ZICFISS-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
+; RV64-ZICFISS-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
+; RV64-ZICFISS-NEXT: .cfi_offset ra, -8
+; RV64-ZICFISS-NEXT: .cfi_offset s0, -16
+; RV64-ZICFISS-NEXT: .cfi_offset s1, -24
+; RV64-ZICFISS-NEXT: .cfi_offset s2, -32
+; RV64-ZICFISS-NEXT: call bar
+; RV64-ZICFISS-NEXT: mv s0, a0
+; RV64-ZICFISS-NEXT: call bar
+; RV64-ZICFISS-NEXT: mv s1, a0
+; RV64-ZICFISS-NEXT: call bar
+; RV64-ZICFISS-NEXT: mv s2, a0
+; RV64-ZICFISS-NEXT: call bar
+; RV64-ZICFISS-NEXT: add s0, s0, s1
+; RV64-ZICFISS-NEXT: add a0, s2, a0
+; RV64-ZICFISS-NEXT: addw a0, s0, a0
+; RV64-ZICFISS-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-ZICFISS-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-ZICFISS-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
+; RV64-ZICFISS-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
+; RV64-ZICFISS-NEXT: addi sp, sp, 32
+; RV64-ZICFISS-NEXT: sspopchk ra
+; RV64-ZICFISS-NEXT: ret
+ %res1 = call i32 @bar()
+ %res2 = call i32 @bar()
+ %res3 = call i32 @bar()
+ %res4 = call i32 @bar()
+ %res12 = add i32 %res1, %res2
+ %res34 = add i32 %res3, %res4
+ %res1234 = add i32 %res12, %res34
+ ret i32 %res1234
+}
+
+define i32 @f5_hw() "hw-shadow-stack" nounwind {
+; RV32-LABEL: f5_hw:
+; RV32: # %bb.0:
+; RV32-NEXT: addi gp, gp, 4
+; RV32-NEXT: sw ra, -4(gp)
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: call bar
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: lw ra, -4(gp)
+; RV32-NEXT: addi gp, gp, -4
+; RV32-NEXT: ret
+;
+; RV64-LABEL: f5_hw:
+; RV64: # %bb.0:
+; RV64-NEXT: addi gp, gp, 8
+; RV64-NEXT: sd ra, -8(gp)
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: call bar
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ld ra, -8(gp)
+; RV64-NEXT: addi gp, gp, -8
+; RV64-NEXT: ret
+;
+; RV32-ZICFISS-LABEL: f5_hw:
+; RV32-ZICFISS: # %bb.0:
+; RV32-ZICFISS-NEXT: sspush ra
+; RV32-ZICFISS-NEXT: addi sp, sp, -16
+; RV32-ZICFISS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ZICFISS-NEXT: call bar
+; RV32-ZICFISS-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ZICFISS-NEXT: addi sp, sp, 16
+; RV32-ZICFISS-NEXT: sspopchk ra
+; RV32-ZICFISS-NEXT: ret
+;
+; RV64-ZICFISS-LABEL: f5_hw:
+; RV64-ZICFISS: # %bb.0:
+; RV64-ZICFISS-NEXT: sspush ra
+; RV64-ZICFISS-NEXT: addi sp, sp, -16
+; RV64-ZICFISS-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-ZICFISS-NEXT: call bar
+; RV64-ZICFISS-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-ZICFISS-NEXT: addi sp, sp, 16
+; RV64-ZICFISS-NEXT: sspopchk ra
+; RV64-ZICFISS-NEXT: ret
+ %res = call i32 @bar()
+ %res1 = add i32 %res, 1
+ ret i32 %res
+}
+
+define void @f1_both() "hw-shadow-stack" shadowcallstack {
+; RV32-LABEL: f1_both:
+; RV32: # %bb.0:
+; RV32-NEXT: ret
+;
+; RV64-LABEL: f1_both:
+; RV64: # %bb.0:
+; RV64-NEXT: ret
+;
+; RV32-ZICFISS-LABEL: f1_both:
+; RV32-ZICFISS: # %bb.0:
+; RV32-ZICFISS-NEXT: ret
+;
+; RV64-ZICFISS-LABEL: f1_both:
+; RV64-ZICFISS: # %bb.0:
+; RV64-ZICFISS-NEXT: ret
+ ret void
+}
+
+define void @f2_both() "hw-shadow-stack" shadowcallstack {
+; RV32-LABEL: f2_both:
+; RV32: # %bb.0:
+; RV32-NEXT: tail foo
+;
+; RV64-LABEL: f2_both:
+; RV64: # %bb.0:
+; RV64-NEXT: tail foo
+;
+; RV32-ZICFISS-LABEL: f2_both:
+; RV32-ZICFISS: # %bb.0:
+; RV32-ZICFISS-NEXT: tail foo
+;
+; RV64-ZICFISS-LABEL: f2_both:
+; RV64-ZICFISS: # %bb.0:
+; RV64-ZICFISS-NEXT: tail foo
+ tail call void @foo()
+ ret void
+}
+
+define i32 @f3_both() "hw-shadow-stack" shadowcallstack {
+; RV32-LABEL: f3_both:
+; RV32: # %bb.0:
+; RV32-NEXT: addi gp, gp, 4
+; RV32-NEXT: sw ra, -4(gp)
+; RV32-NEXT: .cfi_escape 0x16, 0x03, 0x02, 0x73, 0x7c #
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: call bar
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: lw ra, -4(gp)
+; RV32-NEXT: addi gp, gp, -4
+; RV32-NEXT: .cfi_restore gp
+; RV32-NEXT: ret
+;
+; RV64-LABEL: f3_both:
+; RV64: # %bb.0:
+; RV64-NEXT: addi gp, gp, 8
+; RV64-NEXT: sd ra, -8(gp)
+; RV64-NEXT: .cfi_escape 0x16, 0x03, 0x02, 0x73, 0x78 #
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: call bar
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ld ra, -8(gp)
+; RV64-NEXT: addi gp, gp, -8
+; RV64-NEXT: .cfi_restore gp
+; RV64-NEXT: ret
+;
+; RV32-ZICFISS-LABEL: f3_both:
+; RV32-ZICFISS: # %bb.0:
+; RV32-ZICFISS-NEXT: sspush ra
+; RV32-ZICFISS-NEXT: addi sp, sp, -16
+; RV32-ZICFISS-NEXT: .cfi_def_cfa_offset 16
+; RV32-ZICFISS-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ZICFISS-NEXT: .cfi_offset ra, -4
+; RV32-ZIC...
[truncated]
|
✅ With the latest revision this PR passed the C/C++ code formatter. |
2af9b78
to
dfcfa43
Compare
@kito-cheng Do you think it's appropriate to change the dependence of shadow stack related features to Zimop at this point? |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
LGTM |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
This reverts commit 700c43aeded6f853ed4bd701cca0b84fdffca7b7.
e2ded49
to
d87c39b
Compare
d87c39b
to
443f392
Compare
This option was used to override the behavior of `-fsanitize=shadowcallstack` on RISC-V backend, which by default use a hardware implementation if possible, to use the software implementation instead. After #112477 and #112478, now two implementation is represented by independent options and we no longer need it.
This patch follows llvm#112477. Previously `-fsanitize=shadow-call-stack` (which get transform to `Attribute::ShadowCallStack`) is used for enable both hardware and software shadow stack, and another option `-force-sw-shadow-stack` is needed if the user wants to use the software shadow stack where hardware software shadow stack could be supported. It decouples both by using the string attribute `hw-shadow-stack` to distinguish from the software shadow stack attribute.
This option was used to override the behavior of `-fsanitize=shadowcallstack` on RISC-V backend, which by default use a hardware implementation if possible, to use the software implementation instead. After llvm#112477 and llvm#112478, now two implementation is represented by independent options and we no longer need it.
This patch follows #112477.
Previously
-fsanitize=shadow-call-stack
(which get transform toAttribute::ShadowCallStack
) is used for enable both hardware and software shadow stack, and another option-force-sw-shadow-stack
is needed if the user wants to use the software shadow stack where hardware software shadow stack could be supported. It decouples both by using the string attributehw-shadow-stack
to distinguish from the software shadow stack attribute.