Skip to content

Commit

Permalink
[vm] Try to expose data races to rr by simulating a store buffer.
Browse files Browse the repository at this point in the history
TEST=ci
Change-Id: I3fc7c27a8aba5c2eeb1de6d01e9156535e767d6d
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/393621
Reviewed-by: Alexander Aprelev <[email protected]>
Commit-Queue: Ryan Macnak <[email protected]>
  • Loading branch information
rmacnak-google authored and Commit Queue committed Nov 26, 2024
1 parent 9d3f7c5 commit e2b7ec2
Show file tree
Hide file tree
Showing 9 changed files with 360 additions and 83 deletions.
1 change: 1 addition & 0 deletions runtime/tests/vm/dart/gc/splay_test.dart
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
// VMOptions=--profiler --no_load_cse --no_dead_store_elimination
// VMOptions=--profiler --test_il_serialization
// VMOptions=--profiler --dontneed_on_sweep
// VMOptions=--profiler --sim_buffer_memory

import "splay_common.dart";

Expand Down
32 changes: 21 additions & 11 deletions runtime/vm/simulator_arm.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,8 @@ DEFINE_FLAG(uint64_t,
ULLONG_MAX,
"Instruction address or instruction count to stop simulator at.");

DEFINE_FLAG(bool, sim_buffer_memory, false, "Simulate weak memory ordering.");

// This macro provides a platform independent use of sscanf. The reason for
// SScanF not being implemented in a platform independent way through
// OS in the same way as SNPrint is that the Windows C Run-Time
Expand Down Expand Up @@ -723,7 +725,7 @@ char* SimulatorDebugger::ReadLine(const char* prompt) {

void Simulator::Init() {}

Simulator::Simulator() : exclusive_access_addr_(0), exclusive_access_value_(0) {
Simulator::Simulator() : memory_(FLAG_sim_buffer_memory) {
// Setup simulator support first. Some of this information is needed to
// setup the architecture state.
// We allocate the stack here, the size is computed as the sum of
Expand Down Expand Up @@ -992,39 +994,39 @@ void Simulator::UnimplementedInstruction(Instr* instr) {
}

DART_FORCE_INLINE intptr_t Simulator::ReadW(uword addr, Instr* instr) {
return *reinterpret_cast<intptr_t*>(addr);
return memory_.Load<intptr_t>(addr);
}

DART_FORCE_INLINE void Simulator::WriteW(uword addr,
intptr_t value,
Instr* instr) {
*reinterpret_cast<intptr_t*>(addr) = value;
memory_.Store(addr, value);
}

DART_FORCE_INLINE uint16_t Simulator::ReadHU(uword addr, Instr* instr) {
return *reinterpret_cast<uint16_t*>(addr);
return memory_.Load<uint16_t>(addr);
}

DART_FORCE_INLINE int16_t Simulator::ReadH(uword addr, Instr* instr) {
return *reinterpret_cast<int16_t*>(addr);
return memory_.Load<int16_t>(addr);
}

DART_FORCE_INLINE void Simulator::WriteH(uword addr,
uint16_t value,
Instr* instr) {
*reinterpret_cast<uint16_t*>(addr) = value;
memory_.Store(addr, value);
}

DART_FORCE_INLINE uint8_t Simulator::ReadBU(uword addr) {
return *reinterpret_cast<uint8_t*>(addr);
return memory_.Load<uint8_t>(addr);
}

DART_FORCE_INLINE int8_t Simulator::ReadB(uword addr) {
return *reinterpret_cast<int8_t*>(addr);
return memory_.Load<int8_t>(addr);
}

DART_FORCE_INLINE void Simulator::WriteB(uword addr, uint8_t value) {
*reinterpret_cast<uint8_t*>(addr) = value;
memory_.Store(addr, value);
}

void Simulator::ClearExclusive() {
Expand Down Expand Up @@ -1053,8 +1055,8 @@ intptr_t Simulator::WriteExclusiveW(uword addr, intptr_t value, Instr* instr) {
return 1; // Spurious failure.
}

auto atomic_addr = reinterpret_cast<RelaxedAtomic<int32_t>*>(addr);
if (atomic_addr->compare_exchange_weak(old_value, value)) {
if (memory_.CompareExchange(addr, old_value, value,
std::memory_order_relaxed)) {
return 0; // Success.
}
return 1; // Failure.
Expand Down Expand Up @@ -1406,6 +1408,9 @@ typedef void (*SimulatorNativeCallWrapper)(Dart_NativeArguments arguments,
Dart_NativeFunction target);

void Simulator::SupervisorCall(Instr* instr) {
// We can't instrument the runtime.
memory_.FlushAll();

int svc = instr->SvcField();
switch (svc) {
case Instr::kSimulatorRedirectCode: {
Expand Down Expand Up @@ -3393,6 +3398,7 @@ DART_FORCE_INLINE void Simulator::InstructionDecodeImpl(Instr* instr) {
} else if (instr->InstructionBits() ==
static_cast<int32_t>(kDataMemoryBarrier)) {
// Format(instr, "dmb ish");
memory_.FlushAll();
std::atomic_thread_fence(std::memory_order_seq_cst);
} else {
if (instr->IsSIMDDataProcessing()) {
Expand Down Expand Up @@ -3655,6 +3661,10 @@ int64_t Simulator::Call(int32_t entry,
} else {
return_value = Utils::LowHighTo64Bits(get_register(R0), get_register(R1));
}

// We can't instrument the runtime.
memory_.FlushAll();

return return_value;
}

Expand Down
6 changes: 4 additions & 2 deletions runtime/vm/simulator_arm.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

#include "vm/constants.h"
#include "vm/random.h"
#include "vm/simulator_memory.h"

namespace dart {

Expand Down Expand Up @@ -161,6 +162,7 @@ class Simulator {
static int32_t flag_stop_sim_at_;
Random random_;
SimulatorSetjmpBuffer* last_setjmp_buffer_;
SimulatorMemory memory_;

// Registered breakpoints.
Instr* break_pc_;
Expand Down Expand Up @@ -220,8 +222,8 @@ class Simulator {
// performs atomic compare-and-swap with remembered value to observe value
// changes. This implementation of ldrex/strex instructions does not detect
// ABA situation and our uses of ldrex/strex don't need this detection.
uword exclusive_access_addr_;
uword exclusive_access_value_;
uword exclusive_access_addr_ = 0;
uword exclusive_access_value_ = 0;

// Executing is handled based on the instruction type.
void DecodeType01(Instr* instr); // Both type 0 and type 1 rolled into one.
Expand Down
78 changes: 32 additions & 46 deletions runtime/vm/simulator_arm64.cc
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ DEFINE_FLAG(bool,
sim_allow_unaligned_accesses,
true,
"Allow unaligned accesses to Normal memory.");
DEFINE_FLAG(bool, sim_buffer_memory, false, "Simulate weak memory ordering.");

// This macro provides a platform independent use of sscanf. The reason for
// SScanF not being implemented in a platform independent way through
Expand Down Expand Up @@ -778,7 +779,7 @@ char* SimulatorDebugger::ReadLine(const char* prompt) {

void Simulator::Init() {}

Simulator::Simulator() : exclusive_access_addr_(0), exclusive_access_value_(0) {
Simulator::Simulator() : memory_(FLAG_sim_buffer_memory) {
// Setup simulator support first. Some of this information is needed to
// setup the architecture state.
// We allocate the stack here, the size is computed as the sum of
Expand Down Expand Up @@ -1110,15 +1111,15 @@ intptr_t Simulator::ReadX(uword addr,
const bool allow_unaligned_access =
FLAG_sim_allow_unaligned_accesses && !must_be_aligned;
if (allow_unaligned_access || (addr & 7) == 0) {
return LoadUnaligned(reinterpret_cast<intptr_t*>(addr));
return memory_.Load<intptr_t>(addr);
}
UnalignedAccess("read", addr, instr);
return 0;
}

void Simulator::WriteX(uword addr, intptr_t value, Instr* instr) {
if (FLAG_sim_allow_unaligned_accesses || (addr & 7) == 0) {
StoreUnaligned(reinterpret_cast<intptr_t*>(addr), value);
memory_.Store(addr, value);
return;
}
UnalignedAccess("write", addr, instr);
Expand All @@ -1130,65 +1131,62 @@ uint32_t Simulator::ReadWU(uword addr,
const bool allow_unaligned_access =
FLAG_sim_allow_unaligned_accesses && !must_be_aligned;
if (allow_unaligned_access || (addr & 3) == 0) {
return LoadUnaligned(reinterpret_cast<uint32_t*>(addr));
return memory_.Load<uint32_t>(addr);
}
UnalignedAccess("read unsigned single word", addr, instr);
return 0;
}

int32_t Simulator::ReadW(uword addr, Instr* instr) {
if (FLAG_sim_allow_unaligned_accesses || (addr & 3) == 0) {
return LoadUnaligned(reinterpret_cast<int32_t*>(addr));
return memory_.Load<int32_t>(addr);
}
UnalignedAccess("read single word", addr, instr);
return 0;
}

void Simulator::WriteW(uword addr, uint32_t value, Instr* instr) {
if (FLAG_sim_allow_unaligned_accesses || (addr & 3) == 0) {
StoreUnaligned(reinterpret_cast<uint32_t*>(addr), value);
memory_.Store(addr, value);
return;
}
UnalignedAccess("write single word", addr, instr);
}

uint16_t Simulator::ReadHU(uword addr, Instr* instr) {
if (FLAG_sim_allow_unaligned_accesses || (addr & 1) == 0) {
return LoadUnaligned(reinterpret_cast<uint16_t*>(addr));
return memory_.Load<uint16_t>(addr);
}
UnalignedAccess("unsigned halfword read", addr, instr);
return 0;
}

int16_t Simulator::ReadH(uword addr, Instr* instr) {
if (FLAG_sim_allow_unaligned_accesses || (addr & 1) == 0) {
return LoadUnaligned(reinterpret_cast<int16_t*>(addr));
return memory_.Load<int16_t>(addr);
}
UnalignedAccess("signed halfword read", addr, instr);
return 0;
}

void Simulator::WriteH(uword addr, uint16_t value, Instr* instr) {
if (FLAG_sim_allow_unaligned_accesses || (addr & 1) == 0) {
StoreUnaligned(reinterpret_cast<uint16_t*>(addr), value);
memory_.Store(addr, value);
return;
}
UnalignedAccess("halfword write", addr, instr);
}

uint8_t Simulator::ReadBU(uword addr) {
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
return *ptr;
return memory_.Load<uint8_t>(addr);
}

int8_t Simulator::ReadB(uword addr) {
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
return *ptr;
return memory_.Load<int8_t>(addr);
}

void Simulator::WriteB(uword addr, uint8_t value) {
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
*ptr = value;
memory_.Store(addr, value);
}

void Simulator::ClearExclusive() {
Expand All @@ -1202,7 +1200,7 @@ intptr_t Simulator::ReadExclusiveX(uword addr, Instr* instr) {
return exclusive_access_value_;
}

intptr_t Simulator::ReadExclusiveW(uword addr, Instr* instr) {
uint32_t Simulator::ReadExclusiveW(uword addr, Instr* instr) {
exclusive_access_addr_ = addr;
exclusive_access_value_ = ReadWU(addr, instr, /*must_be_aligned=*/true);
return exclusive_access_value_;
Expand All @@ -1223,69 +1221,49 @@ intptr_t Simulator::WriteExclusiveX(uword addr, intptr_t value, Instr* instr) {
return 1; // Suprious failure.
}

auto atomic_addr = reinterpret_cast<RelaxedAtomic<int64_t>*>(addr);
if (atomic_addr->compare_exchange_weak(old_value, value)) {
if (memory_.CompareExchange(addr, old_value, value,
std::memory_order_relaxed)) {
return 0; // Success.
}
return 1; // Failure.
}

intptr_t Simulator::WriteExclusiveW(uword addr, intptr_t value, Instr* instr) {
intptr_t Simulator::WriteExclusiveW(uword addr, uint32_t value, Instr* instr) {
// In a well-formed code store-exclusive instruction should always follow
// a corresponding load-exclusive instruction with the same address.
ASSERT((exclusive_access_addr_ == 0) || (exclusive_access_addr_ == addr));
if (exclusive_access_addr_ != addr) {
return 1; // Failure.
}

int32_t old_value = static_cast<uint32_t>(exclusive_access_value_);
uint32_t old_value = static_cast<uint32_t>(exclusive_access_value_);
ClearExclusive();

if ((random_.NextUInt32() % 16) == 0) {
return 1; // Spurious failure.
}

auto atomic_addr = reinterpret_cast<RelaxedAtomic<int32_t>*>(addr);
if (atomic_addr->compare_exchange_weak(old_value, value)) {
if (memory_.CompareExchange(addr, old_value, value,
std::memory_order_relaxed)) {
return 0; // Success.
}
return 1; // Failure.
}

intptr_t Simulator::ReadAcquire(uword addr, Instr* instr) {
// TODO(42074): Once we switch to C++20 we should change this to use use
// `std::atomic_ref<T>` which supports performing atomic operations on
// non-atomic data.
COMPILE_ASSERT(sizeof(std::atomic<intptr_t>) == sizeof(intptr_t));
return reinterpret_cast<std::atomic<intptr_t>*>(addr)->load(
std::memory_order_acquire);
return memory_.Load<uint64_t>(addr, std::memory_order_acquire);
}

uint32_t Simulator::ReadAcquireW(uword addr, Instr* instr) {
// TODO(42074): Once we switch to C++20 we should change this to use use
// `std::atomic_ref<T>` which supports performing atomic operations on
// non-atomic data.
COMPILE_ASSERT(sizeof(std::atomic<intptr_t>) == sizeof(intptr_t));
return reinterpret_cast<std::atomic<uint32_t>*>(addr)->load(
std::memory_order_acquire);
return memory_.Load<uint32_t>(addr, std::memory_order_acquire);
}

void Simulator::WriteRelease(uword addr, intptr_t value, Instr* instr) {
// TODO(42074): Once we switch to C++20 we should change this to use use
// `std::atomic_ref<T>` which supports performing atomic operations on
// non-atomic data.
COMPILE_ASSERT(sizeof(std::atomic<intptr_t>) == sizeof(intptr_t));
reinterpret_cast<std::atomic<intptr_t>*>(addr)->store(
value, std::memory_order_release);
memory_.Store(addr, value, std::memory_order_release);
}

void Simulator::WriteReleaseW(uword addr, uint32_t value, Instr* instr) {
// TODO(42074): Once we switch to C++20 we should change this to use use
// `std::atomic_ref<T>` which supports performing atomic operations on
// non-atomic data.
COMPILE_ASSERT(sizeof(std::atomic<intptr_t>) == sizeof(intptr_t));
reinterpret_cast<std::atomic<uint32_t>*>(addr)->store(
value, std::memory_order_release);
memory_.Store(addr, value, std::memory_order_release);
}

// Unsupported instructions use Format to print an error and stop execution.
Expand Down Expand Up @@ -1695,6 +1673,9 @@ typedef void (*SimulatorNativeCallWrapper)(Dart_NativeArguments arguments,
Dart_NativeFunction target);

void Simulator::DoRedirectedCall(Instr* instr) {
// We can't instrument the runtime.
memory_.FlushAll();

SimulatorSetjmpBuffer buffer(this);
if (!setjmp(buffer.buffer_)) {
int64_t saved_lr = get_register(LR);
Expand Down Expand Up @@ -1823,6 +1804,7 @@ void Simulator::DecodeSystem(Instr* instr) {

if (instr->InstructionBits() == kDataMemoryBarrier) {
// Format(instr, "dmb ish");
memory_.FlushAll();
std::atomic_thread_fence(std::memory_order_seq_cst);
return;
}
Expand Down Expand Up @@ -3907,6 +3889,10 @@ int64_t Simulator::Call(int64_t entry,
} else {
return_value = get_register(R0);
}

// We can't instrument the runtime.
memory_.FlushAll();

return return_value;
}

Expand Down
Loading

0 comments on commit e2b7ec2

Please sign in to comment.