From 9fc280a7f6dcaf57d9a94d663a6d62c7af1a1f90 Mon Sep 17 00:00:00 2001 From: Tess Strickland Date: Tue, 2 Apr 2024 14:54:13 +0000 Subject: [PATCH] [vm/compiler] Add all Compressed Assembler methods to AssemblerBase. Remove CompareWithCompressedFieldFromOffset, which has no uses. Rename the LoadFromOffset and StoreFromOffset methods that took Addresses to Load and Store, respectively. This makes the names of the Assembler methods more uniform: * Takes an address: Load, Store, LoadField, LoadCompressedField, StoreIntoObject, StoreCompressedIntoObject, LoadSmi, LoadCompressedSmi, etc. * Takes a base register and an offset: LoadFromOffset, StoreToOffset, LoadFieldFromOffset, LoadCompressedFieldFromOffset, StoreIntoObjectOffset, StoreCompressedIntoObjectOffset, LoadSmiFromOffset, LoadCompressedSmiFromOffset, etc. Create AssemblerBase methods for loading and storing compressed pointers that weren't already there, as well as the corresponding methods for loading and storing uncompressed values. Make non-virtual methods that load and store uncompressed fields that call the corresponding method for loading from and storing to memory regions, adjusting the address or offset accordingly. This avoids needing per-architecture overrides for these. Make non-virtual methods that load compressed fields, calling the corresponding method for loading a compressed value from a memory region. (Since compressed pointers are only stored in Dart objects, and stores into a Dart object may require a barrier, there is no method for storing a compressed value into an arbitrary memory region.) Create pure virtual methods for loading from or storing to an Address or any method that does not have both an Address-taking and a base register and offset pair-taking version (e.g., LoadAcquire). Create methods for loading from or storing to a base register and an offset. The base implementation takes the base register and offset and creates an Address from it, then calls the Address-taking equivalent. These methods are non-virtual when the implementation is the same on all architectures and virtual to allow overriding when necessary. Make a non-virtual method for loading uncompressed Smis, since all architectures have the same code for this, including the DEBUG check. If compressed pointers are not being used, all the methods for compressed pointers are non-virtual methods that call the corresponding method for uncompressed values. If compressed pointers are being used: * Install pure virtual methods for loading compressed values from and storing compressed values to an Address or any method that does not have both an Address-taking and a base register and offset pair-taking version (e.g., LoadAcquireCompressed). * Install virtual methods for loading compressed values from and storing compressed values to a base register and offset. Like the uncompressed implementation, the base implementation of these create an Address and call the Address-taking equivalent, and these implementations are overridden on ARM64. * Install a non-virtual method for loading compressed Smis, since the only difference is that it loads a zero-extended 32-bit value, which AssemblerBase can do. TEST=ci (refactoring only) Change-Id: I934791d26a6e2cdaa6ac5f188b0fd89dbdc491d1 Cq-Include-Trybots: luci.dart.try:vm-aot-android-release-arm64c-try,vm-aot-android-release-arm_x64-try,vm-aot-linux-debug-x64-try,vm-aot-linux-debug-x64c-try,vm-aot-mac-release-arm64-try,vm-aot-mac-release-x64-try,vm-aot-obfuscate-linux-release-x64-try,vm-aot-optimization-level-linux-release-x64-try,vm-aot-win-debug-arm64-try,vm-appjit-linux-debug-x64-try,vm-asan-linux-release-x64-try,vm-checked-mac-release-arm64-try,vm-eager-optimization-linux-release-ia32-try,vm-eager-optimization-linux-release-x64-try,vm-ffi-android-debug-arm-try,vm-ffi-android-debug-arm64c-try,vm-ffi-qemu-linux-release-arm-try,vm-ffi-qemu-linux-release-riscv64-try,vm-linux-debug-ia32-try,vm-linux-debug-x64c-try,vm-mac-debug-arm64-try,vm-mac-debug-x64-try,vm-msan-linux-release-x64-try,vm-reload-linux-debug-x64-try,vm-reload-rollback-linux-debug-x64-try,vm-ubsan-linux-release-x64-try,vm-win-debug-arm64-try,vm-win-debug-x64-try,vm-win-release-ia32-try Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/359861 Reviewed-by: Daco Harkes Commit-Queue: Tess Strickland Reviewed-by: Alexander Markov --- runtime/vm/compiler/asm_intrinsifier.cc | 2 +- .../vm/compiler/assembler/assembler_arm.cc | 36 +- runtime/vm/compiler/assembler/assembler_arm.h | 146 +++---- .../vm/compiler/assembler/assembler_arm64.cc | 84 ++-- .../vm/compiler/assembler/assembler_arm64.h | 159 +++---- .../vm/compiler/assembler/assembler_base.cc | 189 +++++++-- .../vm/compiler/assembler/assembler_base.h | 399 +++++++++++++++--- .../vm/compiler/assembler/assembler_ia32.cc | 20 +- .../vm/compiler/assembler/assembler_ia32.h | 119 ++---- .../vm/compiler/assembler/assembler_riscv.cc | 98 +---- .../vm/compiler/assembler/assembler_riscv.h | 147 +------ .../vm/compiler/assembler/assembler_x64.cc | 34 +- runtime/vm/compiler/assembler/assembler_x64.h | 161 ++----- .../compiler/backend/flow_graph_compiler.cc | 12 +- runtime/vm/compiler/backend/il.cc | 5 +- runtime/vm/compiler/backend/il_arm.cc | 12 +- runtime/vm/compiler/backend/il_arm64.cc | 5 +- runtime/vm/compiler/backend/il_ia32.cc | 10 +- runtime/vm/compiler/backend/il_riscv.cc | 51 +-- runtime/vm/compiler/backend/il_x64.cc | 6 +- runtime/vm/compiler/stub_code_compiler.cc | 170 ++++---- runtime/vm/compiler/stub_code_compiler_arm.cc | 7 +- .../vm/compiler/stub_code_compiler_arm64.cc | 7 +- .../vm/compiler/stub_code_compiler_ia32.cc | 2 +- .../vm/compiler/stub_code_compiler_riscv.cc | 7 +- runtime/vm/compiler/stub_code_compiler_x64.cc | 7 +- 26 files changed, 904 insertions(+), 991 deletions(-) diff --git a/runtime/vm/compiler/asm_intrinsifier.cc b/runtime/vm/compiler/asm_intrinsifier.cc index 8ebe07386b63..64da178689b6 100644 --- a/runtime/vm/compiler/asm_intrinsifier.cc +++ b/runtime/vm/compiler/asm_intrinsifier.cc @@ -45,7 +45,7 @@ void AsmIntrinsifier::StringEquality(Assembler* assembler, __ CompareClassId(obj2, string_cid, temp1); __ BranchIf(NOT_EQUAL, normal_ir_body, AssemblerBase::kNearJump); - __ LoadFromOffset(temp1, FieldAddress(obj1, target::String::length_offset())); + __ LoadFieldFromOffset(temp1, obj1, target::String::length_offset()); __ CompareWithMemoryValue( temp1, FieldAddress(obj2, target::String::length_offset())); __ BranchIf(NOT_EQUAL, &is_false, AssemblerBase::kNearJump); diff --git a/runtime/vm/compiler/assembler/assembler_arm.cc b/runtime/vm/compiler/assembler/assembler_arm.cc index 1b7fb32e4e44..e5e5f0c64b1c 100644 --- a/runtime/vm/compiler/assembler/assembler_arm.cc +++ b/runtime/vm/compiler/assembler/assembler_arm.cc @@ -1773,7 +1773,7 @@ void Assembler::StoreIntoObject(Register object, if (memory_order == kRelease) { StoreRelease(value, dest); } else { - StoreToOffset(value, dest); + Store(value, dest); } // In parallel, test whether @@ -1909,7 +1909,7 @@ void Assembler::StoreIntoObjectNoBarrier(Register object, if (memory_order == kRelease) { StoreRelease(value, dest); } else { - StoreToOffset(value, dest); + Store(value, dest); } #if defined(DEBUG) // We can't assert the incremental barrier is not needed here, only the @@ -2030,7 +2030,7 @@ void Assembler::StoreIntoSmiField(const Address& dest, Register value) { Stop("New value must be Smi."); Bind(&done); #endif // defined(DEBUG) - StoreToOffset(value, dest); + Store(value, dest); } void Assembler::ExtractClassIdFromTags(Register result, @@ -2280,16 +2280,6 @@ void Assembler::Bind(Label* label) { BindARMv7(label); } -void Assembler::LoadCompressedSmi(Register dest, const Address& slot) { - ldr(dest, slot); -#if defined(DEBUG) - Label done; - BranchIfSmi(dest, &done, kNearJump); - Stop("Expected Smi"); - Bind(&done); -#endif -} - OperandSize Address::OperandSizeFor(intptr_t cid) { auto const rep = RepresentationUtils::RepresentationOfArrayElement(cid); switch (rep) { @@ -2890,10 +2880,10 @@ Address Assembler::PrepareLargeStoreOffset(const Address& address, return Address(base, offset, mode); } -void Assembler::LoadFromOffset(Register reg, - const Address& address, - OperandSize size, - Condition cond) { +void Assembler::Load(Register reg, + const Address& address, + OperandSize size, + Condition cond) { const Address& addr = PrepareLargeLoadOffset(address, size, cond); switch (size) { case kByte: @@ -2932,10 +2922,10 @@ void Assembler::CompareToStack(Register src, intptr_t depth) { CompareRegisters(src, TMP); } -void Assembler::StoreToOffset(Register reg, - const Address& address, - OperandSize size, - Condition cond) { +void Assembler::Store(Register reg, + const Address& address, + OperandSize size, + Condition cond) { const Address& addr = PrepareLargeStoreOffset(address, size, cond); switch (size) { case kUnsignedByte: @@ -3866,8 +3856,8 @@ void Assembler::LoadElementAddressForRegIndex(Register address, void Assembler::LoadStaticFieldAddress(Register address, Register field, Register scratch) { - LoadCompressedFieldFromOffset( - scratch, field, target::Field::host_offset_or_field_id_offset()); + LoadFieldFromOffset(scratch, field, + target::Field::host_offset_or_field_id_offset()); const intptr_t field_table_offset = compiler::target::Thread::field_table_values_offset(); LoadMemoryValue(address, THR, static_cast(field_table_offset)); diff --git a/runtime/vm/compiler/assembler/assembler_arm.h b/runtime/vm/compiler/assembler/assembler_arm.h index 7371cd066b9e..bb019d7635c0 100644 --- a/runtime/vm/compiler/assembler/assembler_arm.h +++ b/runtime/vm/compiler/assembler/assembler_arm.h @@ -415,7 +415,7 @@ class Assembler : public AssemblerBase { void PushValueAtOffset(Register base, int32_t offset) { UNIMPLEMENTED(); } - void Bind(Label* label); + void Bind(Label* label) override; // Unconditional jump to a given label. [distance] is ignored on ARM. void Jump(Label* label, JumpDistance distance = kFarJump) { b(label); } // Unconditional jump to a given address in register. @@ -423,16 +423,9 @@ class Assembler : public AssemblerBase { // Unconditional jump to a given address in memory. void Jump(const Address& address) { Branch(address); } - void LoadField(Register dst, const FieldAddress& address) override { - LoadFromOffset(dst, address); - } void LoadMemoryValue(Register dst, Register base, int32_t offset) { LoadFromOffset(dst, base, offset); } - void LoadCompressed(Register dest, const Address& slot) { - LoadFromOffset(dest, slot); - } - void LoadCompressedSmi(Register dest, const Address& slot) override; void StoreMemoryValue(Register src, Register base, int32_t offset) { StoreToOffset(src, base, offset); } @@ -440,7 +433,7 @@ class Assembler : public AssemblerBase { Register address, int32_t offset = 0, OperandSize size = kFourBytes) override { - LoadFromOffset(dst, Address(address, offset), size); + Load(dst, Address(address, offset), size); dmb(); } void StoreRelease(Register src, @@ -450,23 +443,16 @@ class Assembler : public AssemblerBase { } void StoreRelease(Register src, Address dest) { dmb(); - StoreToOffset(src, dest); + Store(src, dest); // We don't run TSAN bots on 32 bit. } - void CompareWithCompressedFieldFromOffset(Register value, - Register base, - int32_t offset) { - LoadCompressedFieldFromOffset(TMP, base, offset); - cmp(value, Operand(TMP)); - } - void CompareWithMemoryValue(Register value, Address address, OperandSize size = kFourBytes) override { ASSERT_EQUAL(size, kFourBytes); - LoadFromOffset(TMP, address, size); + Load(TMP, address, size); cmp(value, Operand(TMP)); } @@ -1022,32 +1008,34 @@ class Assembler : public AssemblerBase { void StoreIntoArray(Register object, Register slot, Register value, - CanBeSmi can_value_be_smi = kValueCanBeSmi); - void StoreIntoObjectOffset(Register object, - int32_t offset, - Register value, - CanBeSmi can_value_be_smi = kValueCanBeSmi, - MemoryOrder memory_order = kRelaxedNonAtomic); + CanBeSmi can_value_be_smi = kValueCanBeSmi) override; + void StoreIntoObjectOffset( + Register object, + int32_t offset, + Register value, + CanBeSmi can_value_be_smi = kValueCanBeSmi, + MemoryOrder memory_order = kRelaxedNonAtomic) override; void StoreIntoObjectNoBarrier( Register object, const Address& dest, Register value, MemoryOrder memory_order = kRelaxedNonAtomic) override; - void StoreIntoObjectNoBarrier(Register object, - const Address& dest, - const Object& value, - MemoryOrder memory_order = kRelaxedNonAtomic); + void StoreIntoObjectNoBarrier( + Register object, + const Address& dest, + const Object& value, + MemoryOrder memory_order = kRelaxedNonAtomic) override; void StoreIntoObjectOffsetNoBarrier( Register object, int32_t offset, Register value, - MemoryOrder memory_order = kRelaxedNonAtomic); + MemoryOrder memory_order = kRelaxedNonAtomic) override; void StoreIntoObjectOffsetNoBarrier( Register object, int32_t offset, const Object& value, - MemoryOrder memory_order = kRelaxedNonAtomic); + MemoryOrder memory_order = kRelaxedNonAtomic) override; // Stores a non-tagged value into a heap object. void StoreInternalPointer(Register object, @@ -1106,46 +1094,40 @@ class Assembler : public AssemblerBase { OperandSize sz, Condition cond); + void Load(Register reg, + const Address& address, + OperandSize type, + Condition cond); + void Load(Register reg, + const Address& address, + OperandSize type = kFourBytes) override { + Load(reg, address, type, AL); + } void LoadFromOffset(Register reg, - const Address& address, - OperandSize type, - Condition cond); - void LoadFromOffset(Register reg, - const Address& address, + Register base, + int32_t offset, OperandSize type = kFourBytes) override { - LoadFromOffset(reg, address, type, AL); + LoadFromOffset(reg, base, offset, type, AL); } void LoadFromOffset(Register reg, Register base, int32_t offset, - OperandSize type = kFourBytes, - Condition cond = AL) { - LoadFromOffset(reg, Address(base, offset), type, cond); + OperandSize type, + Condition cond) { + Load(reg, Address(base, offset), type, cond); } void LoadFieldFromOffset(Register reg, Register base, int32_t offset, - OperandSize sz = kFourBytes) override { - LoadFromOffset(reg, FieldAddress(base, offset), sz, AL); + OperandSize type = kFourBytes) override { + LoadFieldFromOffset(reg, base, offset, type, AL); } void LoadFieldFromOffset(Register reg, Register base, int32_t offset, OperandSize type, Condition cond) { - LoadFromOffset(reg, FieldAddress(base, offset), type, cond); - } - void LoadCompressedFieldFromOffset(Register reg, - Register base, - int32_t offset) override { - LoadCompressedFieldFromOffset(reg, base, offset, kFourBytes, AL); - } - void LoadCompressedFieldFromOffset(Register reg, - Register base, - int32_t offset, - OperandSize type, - Condition cond = AL) { - LoadFieldFromOffset(reg, base, offset, type, cond); + Load(reg, FieldAddress(base, offset), type, cond); } // For loading indexed payloads out of tagged objects like Arrays. If the // payload objects are word-sized, use TIMES_HALF_WORD_SIZE if the contents of @@ -1155,47 +1137,52 @@ class Assembler : public AssemblerBase { int32_t payload_start, Register index, ScaleFactor scale, - OperandSize type = kFourBytes) { + OperandSize type = kFourBytes) override { add(dst, base, Operand(index, LSL, scale)); LoadFromOffset(dst, dst, payload_start - kHeapObjectTag, type); } - void LoadIndexedCompressed(Register dst, - Register base, - int32_t offset, - Register index) { - add(dst, base, Operand(index, LSL, TIMES_COMPRESSED_WORD_SIZE)); - LoadCompressedFieldFromOffset(dst, dst, offset); - } void LoadFromStack(Register dst, intptr_t depth); void StoreToStack(Register src, intptr_t depth); void CompareToStack(Register src, intptr_t depth); + void Store(Register reg, + const Address& address, + OperandSize type, + Condition cond); + void Store(Register reg, + const Address& address, + OperandSize type = kFourBytes) override { + Store(reg, address, type, AL); + } void StoreToOffset(Register reg, - const Address& address, - OperandSize type, - Condition cond); - void StoreToOffset(Register reg, - const Address& address, + Register base, + int32_t offset, OperandSize type = kFourBytes) override { - StoreToOffset(reg, address, type, AL); + StoreToOffset(reg, base, offset, type, AL); } void StoreToOffset(Register reg, Register base, int32_t offset, - OperandSize type = kFourBytes, - Condition cond = AL) { - StoreToOffset(reg, Address(base, offset), type, cond); + OperandSize type, + Condition cond) { + Store(reg, Address(base, offset), type, cond); } void StoreFieldToOffset(Register reg, Register base, int32_t offset, - OperandSize type = kFourBytes, - Condition cond = AL) { - StoreToOffset(reg, FieldAddress(base, offset), type, cond); + OperandSize type = kFourBytes) override { + StoreFieldToOffset(reg, base, offset, type, AL); + } + void StoreFieldToOffset(Register reg, + Register base, + int32_t offset, + OperandSize type, + Condition cond) { + Store(reg, FieldAddress(base, offset), type, cond); } void StoreZero(const Address& address, Register temp) { mov(temp, Operand(0)); - StoreToOffset(temp, address); + Store(temp, address); } void LoadSFromOffset(SRegister reg, Register base, @@ -1545,16 +1532,9 @@ class Assembler : public AssemblerBase { Register field, Register scratch); - void LoadCompressedFieldAddressForRegOffset(Register address, - Register instance, - Register offset_in_words_as_smi) { - return LoadFieldAddressForRegOffset(address, instance, - offset_in_words_as_smi); - } - void LoadFieldAddressForRegOffset(Register address, Register instance, - Register offset_in_words_as_smi); + Register offset_in_words_as_smi) override; void LoadFieldAddressForOffset(Register address, Register instance, diff --git a/runtime/vm/compiler/assembler/assembler_arm64.cc b/runtime/vm/compiler/assembler/assembler_arm64.cc index a9987b3301fc..c937ce58f0fb 100644 --- a/runtime/vm/compiler/assembler/assembler_arm64.cc +++ b/runtime/vm/compiler/assembler/assembler_arm64.cc @@ -942,11 +942,15 @@ Address Assembler::PrepareLargeOffset(Register base, } } -void Assembler::LoadFromOffset(Register dest, - const Address& addr, - OperandSize sz) { - ldr(dest, PrepareLargeOffset(addr.base(), addr.offset(), sz, addr.type()), - sz); +void Assembler::Load(Register dst, const Address& addr, OperandSize sz) { + if (addr.type() == Address::AddressType::Offset || + addr.type() == Address::AddressType::PairOffset) { + ldr(dst, PrepareLargeOffset(addr.base(), addr.offset(), sz, addr.type()), + sz); + } else { + // Pass the address through unchanged. + ldr(dst, addr, sz); + } } void Assembler::LoadSFromOffset(VRegister dest, Register base, int32_t offset) { @@ -964,10 +968,15 @@ void Assembler::LoadQFromOffset(VRegister dest, Register base, int32_t offset) { fldrq(dest, PrepareLargeOffset(base, offset, kQWord, type)); } -void Assembler::StoreToOffset(Register src, - const Address& addr, - OperandSize sz) { - str(src, PrepareLargeOffset(addr.base(), addr.offset(), sz, addr.type()), sz); +void Assembler::Store(Register src, const Address& addr, OperandSize sz) { + if (addr.type() == Address::AddressType::Offset || + addr.type() == Address::AddressType::PairOffset) { + str(src, PrepareLargeOffset(addr.base(), addr.offset(), sz, addr.type()), + sz); + } else { + // Pass the address through unchanged. + str(src, addr, sz); + } } void Assembler::StorePairToOffset(Register low, @@ -1024,55 +1033,19 @@ void Assembler::VRSqrts(VRegister vd, VRegister vn) { vmuls(vd, vd, VTMP); } +#if defined(DART_COMPRESSED_POINTERS) void Assembler::LoadCompressed(Register dest, const Address& slot) { -#if !defined(DART_COMPRESSED_POINTERS) - ldr(dest, slot); -#else ldr(dest, slot, kUnsignedFourBytes); // Zero-extension. add(dest, dest, Operand(HEAP_BITS, LSL, 32)); -#endif } void Assembler::LoadCompressedFromOffset(Register dest, Register base, int32_t offset) { -#if !defined(DART_COMPRESSED_POINTERS) - LoadFromOffset(dest, base, offset, kObjectBytes); -#else LoadFromOffset(dest, base, offset, kUnsignedFourBytes); // Zero-extension. add(dest, dest, Operand(HEAP_BITS, LSL, 32)); -#endif } - -void Assembler::LoadCompressedSmi(Register dest, const Address& slot) { -#if !defined(DART_COMPRESSED_POINTERS) - ldr(dest, slot); -#else - ldr(dest, slot, kUnsignedFourBytes); // Zero-extension. -#endif -#if defined(DEBUG) - Label done; - BranchIfSmi(dest, &done, kNearJump); - Stop("Expected Smi"); - Bind(&done); -#endif -} - -void Assembler::LoadCompressedSmiFromOffset(Register dest, - Register base, - int32_t offset) { -#if !defined(DART_COMPRESSED_POINTERS) - LoadFromOffset(dest, base, offset); -#else - LoadFromOffset(dest, base, offset, kUnsignedFourBytes); // Zero-extension. -#endif -#if defined(DEBUG) - Label done; - BranchIfSmi(dest, &done); - Stop("Expected Smi"); - Bind(&done); #endif -} void Assembler::StoreIntoObjectOffset(Register object, int32_t offset, @@ -1087,6 +1060,7 @@ void Assembler::StoreIntoObjectOffset(Register object, StoreBarrier(object, value, value_can_be_smi); } +#if defined(DART_COMPRESSED_POINTERS) void Assembler::StoreCompressedIntoObjectOffset(Register object, int32_t offset, Register value, @@ -1099,6 +1073,7 @@ void Assembler::StoreCompressedIntoObjectOffset(Register object, } StoreBarrier(object, value, value_can_be_smi); } +#endif void Assembler::StoreIntoObject(Register object, const Address& dest, @@ -1111,6 +1086,7 @@ void Assembler::StoreIntoObject(Register object, StoreBarrier(object, value, can_be_smi); } +#if defined(DART_COMPRESSED_POINTERS) void Assembler::StoreCompressedIntoObject(Register object, const Address& dest, Register value, @@ -1121,6 +1097,7 @@ void Assembler::StoreCompressedIntoObject(Register object, str(value, dest, kObjectBytes); StoreBarrier(object, value, can_be_smi); } +#endif void Assembler::StoreBarrier(Register object, Register value, @@ -1195,6 +1172,7 @@ void Assembler::StoreIntoArray(Register object, StoreIntoArrayBarrier(object, slot, value, can_be_smi); } +#if defined(DART_COMPRESSED_POINTERS) void Assembler::StoreCompressedIntoArray(Register object, Register slot, Register value, @@ -1202,6 +1180,7 @@ void Assembler::StoreCompressedIntoArray(Register object, str(value, Address(slot, 0), kObjectBytes); StoreIntoArrayBarrier(object, slot, value, can_be_smi); } +#endif void Assembler::StoreIntoArrayBarrier(Register object, Register slot, @@ -1274,6 +1253,7 @@ void Assembler::StoreIntoObjectNoBarrier(Register object, // No store buffer update. } +#if defined(DART_COMPRESSED_POINTERS) void Assembler::StoreCompressedIntoObjectNoBarrier(Register object, const Address& dest, Register value, @@ -1298,6 +1278,7 @@ void Assembler::StoreCompressedIntoObjectNoBarrier(Register object, #endif // defined(DEBUG) // No store buffer update. } +#endif void Assembler::StoreIntoObjectOffsetNoBarrier(Register object, int32_t offset, @@ -1313,6 +1294,7 @@ void Assembler::StoreIntoObjectOffsetNoBarrier(Register object, } } +#if defined(DART_COMPRESSED_POINTERS) void Assembler::StoreCompressedIntoObjectOffsetNoBarrier( Register object, int32_t offset, @@ -1328,6 +1310,7 @@ void Assembler::StoreCompressedIntoObjectOffsetNoBarrier( StoreCompressedIntoObjectNoBarrier(object, Address(TMP), value); } } +#endif void Assembler::StoreIntoObjectNoBarrier(Register object, const Address& dest, @@ -1346,6 +1329,7 @@ void Assembler::StoreIntoObjectNoBarrier(Register object, } } +#if defined(DART_COMPRESSED_POINTERS) void Assembler::StoreCompressedIntoObjectNoBarrier(Register object, const Address& dest, const Object& value, @@ -1364,6 +1348,7 @@ void Assembler::StoreCompressedIntoObjectNoBarrier(Register object, str(TMP2, dest, kObjectBytes); } } +#endif void Assembler::StoreIntoObjectOffsetNoBarrier(Register object, int32_t offset, @@ -1387,6 +1372,7 @@ void Assembler::StoreIntoObjectOffsetNoBarrier(Register object, } } +#if defined(DART_COMPRESSED_POINTERS) void Assembler::StoreCompressedIntoObjectOffsetNoBarrier( Register object, int32_t offset, @@ -1411,6 +1397,7 @@ void Assembler::StoreCompressedIntoObjectOffsetNoBarrier( StoreCompressedIntoObjectNoBarrier(object, Address(TMP), value); } } +#endif void Assembler::StoreInternalPointer(Register object, const Address& dest, @@ -2128,8 +2115,7 @@ void Assembler::TryAllocateObject(intptr_t cid, const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size); LoadImmediate(temp_reg, tags); - StoreToOffset(temp_reg, - FieldAddress(instance_reg, target::Object::tags_offset())); + Store(temp_reg, FieldAddress(instance_reg, target::Object::tags_offset())); } else { b(failure); } @@ -2355,6 +2341,7 @@ void Assembler::LoadStaticFieldAddress(Register address, Operand(scratch, LSL, target::kWordSizeLog2 - kSmiTagShift)); } +#if defined(DART_COMPRESSED_POINTERS) void Assembler::LoadCompressedFieldAddressForRegOffset( Register address, Register instance, @@ -2364,6 +2351,7 @@ void Assembler::LoadCompressedFieldAddressForRegOffset( target::kCompressedWordSizeLog2 - kSmiTagShift)); AddImmediate(address, -kHeapObjectTag); } +#endif void Assembler::LoadFieldAddressForRegOffset(Register address, Register instance, diff --git a/runtime/vm/compiler/assembler/assembler_arm64.h b/runtime/vm/compiler/assembler/assembler_arm64.h index 728d5689d1df..fc178857e4a1 100644 --- a/runtime/vm/compiler/assembler/assembler_arm64.h +++ b/runtime/vm/compiler/assembler/assembler_arm64.h @@ -500,7 +500,7 @@ class Assembler : public AssemblerBase { } } - void Bind(Label* label); + void Bind(Label* label) override; // Unconditional jump to a given label. [distance] is ignored on ARM. void Jump(Label* label, JumpDistance distance = kFarJump) { b(label); } // Unconditional jump to a given address in register. @@ -511,12 +511,6 @@ class Assembler : public AssemblerBase { br(TMP); } - void LoadField(Register dst, const FieldAddress& address) override { - LoadFromOffset(dst, address); - } - void LoadCompressedField(Register dst, const FieldAddress& address) override { - LoadCompressed(dst, address); - } void LoadMemoryValue(Register dst, Register base, int32_t offset) { LoadFromOffset(dst, base, offset, kEightBytes); } @@ -544,14 +538,14 @@ class Assembler : public AssemblerBase { #endif } +#if defined(DART_COMPRESSED_POINTERS) void LoadAcquireCompressed(Register dst, Register address, int32_t offset = 0) override { LoadAcquire(dst, address, offset, kObjectBytes); -#if defined(DART_COMPRESSED_POINTERS) add(dst, dst, Operand(HEAP_BITS, LSL, 32)); -#endif } +#endif void StoreRelease(Register src, Register address, @@ -567,9 +561,10 @@ class Assembler : public AssemblerBase { #endif } +#if defined(DART_COMPRESSED_POINTERS) void StoreReleaseCompressed(Register src, Register address, - int32_t offset = 0) { + int32_t offset = 0) override { Register kResultReg = address; if (offset != 0) { kResultReg = TMP; @@ -580,18 +575,12 @@ class Assembler : public AssemblerBase { TsanStoreRelease(kResultReg); #endif } - - void CompareWithCompressedFieldFromOffset(Register value, - Register base, - int32_t offset) { - LoadCompressedFieldFromOffset(TMP, base, offset); - cmp(value, Operand(TMP)); - } +#endif void CompareWithMemoryValue(Register value, Address address, OperandSize sz = kEightBytes) override { - LoadFromOffset(TMP, address, sz); + Load(TMP, address, sz); cmp(value, Operand(TMP), sz); } @@ -1880,31 +1869,9 @@ class Assembler : public AssemblerBase { int32_t offset, OperandSize sz, Address::AddressType addr_type); - void LoadFromOffset(Register dest, - const Address& address, - OperandSize sz = kEightBytes) override; - void LoadFromOffset(Register dest, - Register base, - int32_t offset, - OperandSize sz = kEightBytes) { - LoadFromOffset(dest, Address(base, offset), sz); - } - void LoadFieldFromOffset(Register dest, - Register base, - int32_t offset, - OperandSize sz = kEightBytes) override { - LoadFromOffset(dest, FieldAddress(base, offset), sz); - } - void LoadCompressedFieldFromOffset(Register dest, - Register base, - int32_t offset) override { - LoadCompressedFromOffset(dest, base, offset - kHeapObjectTag); - } - void LoadCompressedSmiFieldFromOffset(Register dest, - Register base, - int32_t offset) { - LoadCompressedSmiFromOffset(dest, base, offset - kHeapObjectTag); - } + void Load(Register dest, + const Address& address, + OperandSize sz = kEightBytes) override; // For loading indexed payloads out of tagged objects like Arrays. If the // payload objects are word-sized, use TIMES_HALF_WORD_SIZE if the contents of // [index] is a Smi, otherwise TIMES_WORD_SIZE if unboxed. @@ -1913,17 +1880,19 @@ class Assembler : public AssemblerBase { int32_t payload_offset, Register index, ScaleFactor scale, - OperandSize sz = kEightBytes) { + OperandSize sz = kEightBytes) override { add(dest, base, Operand(index, LSL, scale)); LoadFromOffset(dest, dest, payload_offset - kHeapObjectTag, sz); } +#if defined(DART_COMPRESSED_POINTERS) void LoadIndexedCompressed(Register dest, Register base, int32_t offset, - Register index) { + Register index) override { add(dest, base, Operand(index, LSL, TIMES_COMPRESSED_WORD_SIZE)); LoadCompressedFieldFromOffset(dest, dest, offset); } +#endif void LoadSFromOffset(VRegister dest, Register base, int32_t offset); void LoadDFromOffset(VRegister dest, Register base, int32_t offset); void LoadDFieldFromOffset(VRegister dest, Register base, int32_t offset) { @@ -1938,23 +1907,11 @@ class Assembler : public AssemblerBase { void StoreToStack(Register src, intptr_t depth); void CompareToStack(Register src, intptr_t depth); - void StoreToOffset(Register src, - const Address& address, - OperandSize sz = kEightBytes) override; - void StoreToOffset(Register src, - Register base, - int32_t offset, - OperandSize sz = kEightBytes) { - StoreToOffset(src, Address(base, offset), sz); - } - void StoreFieldToOffset(Register src, - Register base, - int32_t offset, - OperandSize sz = kEightBytes) { - StoreToOffset(src, FieldAddress(base, offset), sz); - } + void Store(Register src, + const Address& address, + OperandSize sz = kEightBytes) override; void StoreZero(const Address& address, Register temp = kNoRegister) { - StoreToOffset(ZR, address); + Store(ZR, address); } void StorePairToOffset(Register low, @@ -1997,12 +1954,12 @@ class Assembler : public AssemblerBase { } } - void LoadCompressed(Register dest, const Address& slot); - void LoadCompressedFromOffset(Register dest, Register base, int32_t offset); - void LoadCompressedSmi(Register dest, const Address& slot) override; - void LoadCompressedSmiFromOffset(Register dest, - Register base, - int32_t offset); +#if defined(DART_COMPRESSED_POINTERS) + void LoadCompressed(Register dest, const Address& slot) override; + void LoadCompressedFromOffset(Register dest, + Register base, + int32_t offset) override; +#endif // Store into a heap object and apply the generational and incremental write // barriers. All stores into heap objects must pass through this function or, @@ -2014,76 +1971,93 @@ class Assembler : public AssemblerBase { Register value, CanBeSmi can_value_be_smi = kValueCanBeSmi, MemoryOrder memory_order = kRelaxedNonAtomic) override; +#if defined(DART_COMPRESSED_POINTERS) void StoreCompressedIntoObject( Register object, const Address& dest, Register value, CanBeSmi can_value_be_smi = kValueCanBeSmi, MemoryOrder memory_order = kRelaxedNonAtomic) override; +#endif void StoreBarrier(Register object, Register value, CanBeSmi can_value_be_smi); void StoreIntoArray(Register object, Register slot, Register value, - CanBeSmi can_value_be_smi = kValueCanBeSmi); - void StoreCompressedIntoArray(Register object, - Register slot, - Register value, - CanBeSmi can_value_be_smi = kValueCanBeSmi); + CanBeSmi can_value_be_smi = kValueCanBeSmi) override; +#if defined(DART_COMPRESSED_POINTERS) + void StoreCompressedIntoArray( + Register object, + Register slot, + Register value, + CanBeSmi can_value_be_smi = kValueCanBeSmi) override; +#endif void StoreIntoArrayBarrier(Register object, Register slot, Register value, CanBeSmi can_value_be_smi); - void StoreIntoObjectOffset(Register object, - int32_t offset, - Register value, - CanBeSmi can_value_be_smi = kValueCanBeSmi, - MemoryOrder memory_order = kRelaxedNonAtomic); + void StoreIntoObjectOffset( + Register object, + int32_t offset, + Register value, + CanBeSmi can_value_be_smi = kValueCanBeSmi, + MemoryOrder memory_order = kRelaxedNonAtomic) override; +#if defined(DART_COMPRESSED_POINTERS) void StoreCompressedIntoObjectOffset( Register object, int32_t offset, Register value, CanBeSmi can_value_be_smi = kValueCanBeSmi, - MemoryOrder memory_order = kRelaxedNonAtomic); + MemoryOrder memory_order = kRelaxedNonAtomic) override; +#endif void StoreIntoObjectNoBarrier( Register object, const Address& dest, Register value, MemoryOrder memory_order = kRelaxedNonAtomic) override; +#if defined(DART_COMPRESSED_POINTERS) void StoreCompressedIntoObjectNoBarrier( Register object, const Address& dest, Register value, MemoryOrder memory_order = kRelaxedNonAtomic) override; +#endif void StoreIntoObjectOffsetNoBarrier( Register object, int32_t offset, Register value, - MemoryOrder memory_order = kRelaxedNonAtomic); + MemoryOrder memory_order = kRelaxedNonAtomic) override; +#if defined(DART_COMPRESSED_POINTERS) void StoreCompressedIntoObjectOffsetNoBarrier( Register object, int32_t offset, Register value, - MemoryOrder memory_order = kRelaxedNonAtomic); - void StoreIntoObjectNoBarrier(Register object, - const Address& dest, - const Object& value, - MemoryOrder memory_order = kRelaxedNonAtomic); + MemoryOrder memory_order = kRelaxedNonAtomic) override; +#endif + void StoreIntoObjectNoBarrier( + Register object, + const Address& dest, + const Object& value, + MemoryOrder memory_order = kRelaxedNonAtomic) override; +#if defined(DART_COMPRESSED_POINTERS) void StoreCompressedIntoObjectNoBarrier( Register object, const Address& dest, const Object& value, - MemoryOrder memory_order = kRelaxedNonAtomic); + MemoryOrder memory_order = kRelaxedNonAtomic) override; +#endif void StoreIntoObjectOffsetNoBarrier( Register object, int32_t offset, const Object& value, - MemoryOrder memory_order = kRelaxedNonAtomic); + MemoryOrder memory_order = kRelaxedNonAtomic) override; +#if defined(DART_COMPRESSED_POINTERS) void StoreCompressedIntoObjectOffsetNoBarrier( Register object, int32_t offset, const Object& value, - MemoryOrder memory_order = kRelaxedNonAtomic); + MemoryOrder memory_order = kRelaxedNonAtomic) override; +#endif // Stores a non-tagged value into a heap object. void StoreInternalPointer(Register object, @@ -2371,13 +2345,16 @@ class Assembler : public AssemblerBase { Register field, Register scratch); - void LoadCompressedFieldAddressForRegOffset(Register address, - Register instance, - Register offset_in_words_as_smi); +#if defined(DART_COMPRESSED_POINTERS) + void LoadCompressedFieldAddressForRegOffset( + Register address, + Register instance, + Register offset_in_words_as_smi) override; +#endif void LoadFieldAddressForRegOffset(Register address, Register instance, - Register offset_in_words_as_smi); + Register offset_in_words_as_smi) override; void LoadFieldAddressForOffset(Register address, Register instance, diff --git a/runtime/vm/compiler/assembler/assembler_base.cc b/runtime/vm/compiler/assembler/assembler_base.cc index 083f58a5306a..5ad969f64fff 100644 --- a/runtime/vm/compiler/assembler/assembler_base.cc +++ b/runtime/vm/compiler/assembler/assembler_base.cc @@ -32,54 +32,159 @@ AssemblerBase::~AssemblerBase() {} void AssemblerBase::LoadFromSlot(Register dst, Register base, const Slot& slot) { - auto const rep = slot.representation(); - const FieldAddress address(base, slot.offset_in_bytes()); - if (rep != kTagged) { - auto const sz = RepresentationUtils::OperandSize(rep); - return LoadFromOffset(dst, address, sz); + if (slot.is_unboxed()) { + // The result cannot be a floating point or SIMD value. + ASSERT(slot.representation() == kUntagged || + RepresentationUtils::IsUnboxedInteger(slot.representation())); + // Since we only have a single destination register, the result value must + // fit into a register. + ASSERT(RepresentationUtils::ValueSize(slot.representation()) <= + compiler::target::kWordSize); + const intptr_t offset = slot.offset_in_bytes() - kHeapObjectTag; + auto const sz = RepresentationUtils::OperandSize(slot.representation()); + return LoadFromOffset(dst, base, offset, sz); } - if (slot.is_compressed()) { - if (slot.type().ToCid() == kSmiCid) { - return LoadCompressedSmi(dst, address); - } else { - return LoadCompressedField(dst, address); - } + if (!slot.is_compressed()) { + LoadFieldFromOffset(dst, base, slot.offset_in_bytes()); + } else if (slot.type().ToCid() == kSmiCid) { + LoadCompressedSmiFieldFromOffset(dst, base, slot.offset_in_bytes()); + } else { + LoadCompressedFieldFromOffset(dst, base, slot.offset_in_bytes()); } - return LoadField(dst, address); } -void AssemblerBase::StoreToSlot(Register src, Register base, const Slot& slot) { - auto const rep = slot.representation(); - const FieldAddress address(base, slot.offset_in_bytes()); - if (rep != kTagged) { - auto const sz = RepresentationUtils::OperandSize(rep); - return StoreToOffset(src, address, sz); +void AssemblerBase::StoreToSlot(Register src, + Register base, + const Slot& slot, + MemoryOrder memory_order) { + auto const can_be_smi = + slot.type().CanBeSmi() ? kValueCanBeSmi : kValueIsNotSmi; + StoreToSlot(src, base, slot, can_be_smi, memory_order); +} + +void AssemblerBase::StoreToSlot(Register src, + Register base, + const Slot& slot, + CanBeSmi can_be_smi, + MemoryOrder memory_order) { + if (slot.is_unboxed()) { + // Same as the no barrier case. + StoreToSlotNoBarrier(src, base, slot, memory_order); } if (slot.is_compressed()) { - return StoreCompressedIntoObject( - base, address, src, - slot.type().CanBeSmi() ? kValueCanBeSmi : kValueIsNotSmi); + StoreCompressedIntoObjectOffset(base, slot.offset_in_bytes(), src, + can_be_smi, memory_order); + } else { + StoreIntoObjectOffset(base, slot.offset_in_bytes(), src, can_be_smi, + memory_order); } - return StoreIntoObject( - base, address, src, - slot.type().CanBeSmi() ? kValueCanBeSmi : kValueIsNotSmi); } void AssemblerBase::StoreToSlotNoBarrier(Register src, Register base, - const Slot& slot) { - auto const rep = slot.representation(); - const FieldAddress address(base, slot.offset_in_bytes()); - if (rep != kTagged) { - auto const sz = RepresentationUtils::OperandSize(rep); - return StoreToOffset(src, address, sz); + const Slot& slot, + MemoryOrder memory_order) { + if (slot.is_unboxed()) { + // The stored value cannot be a floating point or SIMD value. + ASSERT(slot.representation() == kUntagged || + RepresentationUtils::IsUnboxedInteger(slot.representation())); + // Since we only have a single source register, the stored value must + // fit into a register. + ASSERT(RepresentationUtils::ValueSize(slot.representation()) <= + compiler::target::kWordSize); + const intptr_t offset = slot.offset_in_bytes() - kHeapObjectTag; + auto const sz = RepresentationUtils::OperandSize(slot.representation()); + return StoreToOffset(src, base, offset, sz); } if (slot.is_compressed()) { - return StoreCompressedIntoObjectNoBarrier(base, address, src); + StoreCompressedIntoObjectOffsetNoBarrier(base, slot.offset_in_bytes(), src, + memory_order); + } else { + StoreIntoObjectOffsetNoBarrier(base, slot.offset_in_bytes(), src, + memory_order); } - return StoreIntoObjectNoBarrier(base, address, src); } +void AssemblerBase::LoadFromOffset(Register dst, + Register base, + int32_t offset, + OperandSize sz) { + Load(dst, Address(base, offset), sz); +} + +void AssemblerBase::StoreToOffset(Register src, + Register base, + int32_t offset, + OperandSize sz) { + Store(src, Address(base, offset), sz); +} + +void AssemblerBase::LoadField(Register dst, + const FieldAddress& address, + OperandSize sz) { + Load(dst, address, sz); +} + +void AssemblerBase::LoadCompressedField(Register dst, + const FieldAddress& address) { + LoadCompressed(dst, address); +} + +void AssemblerBase::StoreIntoObjectOffset(Register object, + int32_t offset, + Register value, + CanBeSmi can_be_smi, + MemoryOrder memory_order) { + StoreIntoObject(object, FieldAddress(object, offset), value, can_be_smi, + memory_order); +} +void AssemblerBase::StoreIntoObjectOffsetNoBarrier(Register object, + int32_t offset, + Register value, + MemoryOrder memory_order) { + StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value, + memory_order); +} +void AssemblerBase::StoreIntoObjectOffsetNoBarrier(Register object, + int32_t offset, + const Object& value, + MemoryOrder memory_order) { + StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value, + memory_order); +} + +#if defined(DART_COMPRESSED_POINTERS) +void AssemblerBase::LoadCompressedFromOffset(Register dst, + Register base, + int32_t offset) { + LoadCompressed(dst, Address(base, offset)); +} +void AssemblerBase::StoreCompressedIntoObjectOffset(Register object, + int32_t offset, + Register value, + CanBeSmi can_be_smi, + MemoryOrder memory_order) { + StoreCompressedIntoObject(object, FieldAddress(object, offset), value, + can_be_smi, memory_order); +} +void AssemblerBase::StoreCompressedIntoObjectOffsetNoBarrier( + Register object, + int32_t offset, + Register value, + MemoryOrder memory_order) { + StoreCompressedIntoObjectNoBarrier(object, FieldAddress(object, offset), + value, memory_order); +} +void AssemblerBase::StoreCompressedIntoObjectOffsetNoBarrier( + Register object, + int32_t offset, + const Object& value, + MemoryOrder memory_order) { + StoreCompressedIntoObjectNoBarrier(object, FieldAddress(object, offset), + value, memory_order); +} +#endif + void AssemblerBase::UnrolledMemCopy(Register dst_base, intptr_t dst_offset, Register src_base, @@ -89,28 +194,24 @@ void AssemblerBase::UnrolledMemCopy(Register dst_base, intptr_t offset = 0; if (target::kWordSize >= 8) { while (offset + 8 <= size) { - LoadFromOffset(temp, Address(src_base, src_offset + offset), kEightBytes); - StoreToOffset(temp, Address(dst_base, dst_offset + offset), kEightBytes); + LoadFromOffset(temp, src_base, src_offset + offset, kEightBytes); + StoreToOffset(temp, dst_base, dst_offset + offset, kEightBytes); offset += 8; } } while (offset + 4 <= size) { - LoadFromOffset(temp, Address(src_base, src_offset + offset), - kUnsignedFourBytes); - StoreToOffset(temp, Address(dst_base, dst_offset + offset), - kUnsignedFourBytes); + LoadFromOffset(temp, src_base, src_offset + offset, kUnsignedFourBytes); + StoreToOffset(temp, dst_base, dst_offset + offset, kUnsignedFourBytes); offset += 4; } while (offset + 2 <= size) { - LoadFromOffset(temp, Address(src_base, src_offset + offset), - kUnsignedTwoBytes); - StoreToOffset(temp, Address(dst_base, dst_offset + offset), - kUnsignedTwoBytes); + LoadFromOffset(temp, src_base, src_offset + offset, kUnsignedTwoBytes); + StoreToOffset(temp, dst_base, dst_offset + offset, kUnsignedTwoBytes); offset += 2; } while (offset + 1 <= size) { - LoadFromOffset(temp, Address(src_base, src_offset + offset), kUnsignedByte); - StoreToOffset(temp, Address(dst_base, dst_offset + offset), kUnsignedByte); + LoadFromOffset(temp, src_base, src_offset + offset, kUnsignedByte); + StoreToOffset(temp, dst_base, dst_offset + offset, kUnsignedByte); offset += 1; } ASSERT(offset == size); diff --git a/runtime/vm/compiler/assembler/assembler_base.h b/runtime/vm/compiler/assembler/assembler_base.h index 8a282dbff9ef..a8cc3151e5c6 100644 --- a/runtime/vm/compiler/assembler/assembler_base.h +++ b/runtime/vm/compiler/assembler/assembler_base.h @@ -629,6 +629,8 @@ class AssemblerBase : public StackResource { virtual void SmiTag(Register r) = 0; + virtual void Bind(Label* label) = 0; + // If Smis are compressed and the Smi value in dst is non-negative, ensures // the upper bits are cleared. If Smis are not compressed, is a no-op. // @@ -704,14 +706,6 @@ class AssemblerBase : public StackResource { instance_reg, temp); } - virtual void LoadFromOffset(Register dst, - const Address& address, - OperandSize sz = kWordBytes) = 0; - // Does not use write barriers, use StoreIntoObject instead for boxed fields. - virtual void StoreToOffset(Register src, - const Address& address, - OperandSize sz = kWordBytes) = 0; - virtual void BranchIfSmi(Register reg, Label* label, JumpDistance distance = kFarJump) = 0; @@ -745,99 +739,360 @@ class AssemblerBase : public StackResource { kRelaxedNonAtomic, }; - virtual void LoadAcquire(Register reg, - Register address, - int32_t offset = 0, - OperandSize size = kWordBytes) = 0; - virtual void LoadFieldAddressForOffset(Register reg, Register base, int32_t offset) = 0; + virtual void LoadFieldAddressForRegOffset( + Register address, + Register instance, + Register offset_in_words_as_smi) = 0; - virtual void LoadField(Register dst, const FieldAddress& address) = 0; - virtual void LoadFieldFromOffset(Register reg, - Register base, - int32_t offset, - OperandSize = kWordBytes) = 0; - void LoadFromSlot(Register dst, Register base, const Slot& slot); + virtual void LoadAcquire(Register reg, + Register address, + int32_t offset = 0, + OperandSize size = kWordBytes) = 0; + virtual void StoreRelease(Register src, + Register address, + int32_t offset = 0) = 0; + virtual void Load(Register dst, + const Address& address, + OperandSize sz = kWordBytes) = 0; + // Does not use write barriers, use StoreIntoObject instead for boxed fields. + virtual void Store(Register src, + const Address& address, + OperandSize sz = kWordBytes) = 0; virtual void StoreIntoObject( - Register object, // Object we are storing into. - const Address& dest, // Where we are storing into. - Register value, // Value we are storing. + Register object, // Object being stored into. + const Address& address, // Offset into object. + Register value, // Value being stored. CanBeSmi can_be_smi = kValueCanBeSmi, MemoryOrder memory_order = kRelaxedNonAtomic) = 0; virtual void StoreIntoObjectNoBarrier( - Register object, // Object we are storing into. - const Address& dest, // Where we are storing into. - Register value, // Value we are storing. + Register object, // Object being stored into. + const Address& address, // Offset into object. + Register value, // Value being stored. + MemoryOrder memory_order = kRelaxedNonAtomic) = 0; + virtual void StoreIntoObjectNoBarrier( + Register object, // Object being stored into. + const Address& address, // Offset into object. + const Object& value, // Value being stored. MemoryOrder memory_order = kRelaxedNonAtomic) = 0; - // For native unboxed slots, both methods are the same, as no write barrier - // is needed. - void StoreToSlot(Register src, Register base, const Slot& slot); - void StoreToSlotNoBarrier(Register src, Register base, const Slot& slot); - // Loads a Smi, handling sign extension appropriately when compressed. - // In DEBUG mode, also checks that the loaded value is a Smi and halts if not. - virtual void LoadCompressedSmi(Register dst, const Address& slot) = 0; + virtual void LoadIndexedPayload(Register dst, + Register base, + int32_t offset, + Register index, + ScaleFactor scale, + OperandSize sz = kWordBytes) = 0; + virtual void StoreIntoArray(Register object, + Register slot, + Register value, + CanBeSmi can_value_be_smi = kValueCanBeSmi) = 0; + + // For virtual XFromOffset methods, the base method implementation creates an + // appropriate address from the base register and offset and calls the + // corresponding address-taking method. These should be overridden for + // architectures where offsets should not be converted to addresses without + // additional precautions, or for when the ARM-specific Assembler needs + // to override with an overloaded version for the Condition argument. + + virtual void LoadFromOffset(Register dst, + Register base, + int32_t offset, + OperandSize sz = kWordBytes); + // Does not use write barriers, use StoreIntoObject instead for boxed fields. + virtual void StoreToOffset(Register src, + Register base, + int32_t offset, + OperandSize sz = kWordBytes); + + virtual void StoreIntoObjectOffset( + Register object, // Object being stored into. + int32_t offset, // Offset into object. + Register value, // Value being stored. + CanBeSmi can_be_smi = kValueCanBeSmi, + MemoryOrder memory_order = kRelaxedNonAtomic); + virtual void StoreIntoObjectOffsetNoBarrier( + Register object, // Object being stored into. + int32_t offset, // Offset into object. + Register value, // Value being stored. + MemoryOrder memory_order = kRelaxedNonAtomic); + virtual void StoreIntoObjectOffsetNoBarrier( + Register object, // Object being stored into. + int32_t offset, // Offset into object. + const Object& value, // Value being stored. + MemoryOrder memory_order = kRelaxedNonAtomic); + + void LoadField(Register dst, + const FieldAddress& address, + OperandSize sz = kWordBytes); + virtual void LoadFieldFromOffset(Register dst, + Register base, + int32_t offset, + OperandSize sz = kWordBytes) { + LoadFromOffset(dst, base, offset - kHeapObjectTag, sz); + } + // Does not use write barriers, use StoreIntoObjectOffset instead for + // boxed fields. + virtual void StoreFieldToOffset(Register src, + Register base, + int32_t offset, + OperandSize sz = kWordBytes) { + StoreToOffset(src, base, offset - kHeapObjectTag, sz); + } + + // Loads a Smi. In DEBUG mode, also checks that the loaded value is a Smi and + // halts if not. + void LoadSmi(Register dst, const Address& address) { + Load(dst, address); + DEBUG_ONLY(VerifySmi(dst)); + } + + // Loads a Smi. In DEBUG mode, also checks that the loaded value is a Smi and + // halts if not. + void LoadSmiFromOffset(Register dst, Register base, int32_t offset) { + LoadFromOffset(dst, base, offset); + DEBUG_ONLY(VerifySmi(dst)); + } - // Install pure virtual methods if using compressed pointers, to ensure that - // these methods are overridden. If there are no compressed pointers, forward - // to the uncompressed version. #if defined(DART_COMPRESSED_POINTERS) + // Add pure virtual methods for methods that take addresses. + // + // Since the methods are only virtual when using compressed pointers, the + // overriding definitions must be guarded by the appropriate #ifdef. + + virtual void LoadCompressedFieldAddressForRegOffset( + Register address, + Register instance, + Register offset_in_words_as_smi) = 0; + virtual void LoadAcquireCompressed(Register dst, Register address, int32_t offset = 0) = 0; - virtual void LoadCompressedField(Register dst, - const FieldAddress& address) = 0; - virtual void LoadCompressedFieldFromOffset(Register dst, - Register base, - int32_t offset) = 0; + virtual void StoreReleaseCompressed(Register src, + Register address, + int32_t offset = 0) = 0; + + virtual void LoadCompressed(Register dst, const Address& address) = 0; + + // There is no StoreCompressed because only Dart objects contain compressed + // pointers and compressed pointers may require write barriers, so + // StoreCompressedIntoObject should be used instead. + virtual void StoreCompressedIntoObject( - Register object, // Object we are storing into. - const Address& dest, // Where we are storing into. - Register value, // Value we are storing. + Register object, // Object being stored into. + const Address& address, // Address to store the value at. + Register value, // Value being stored. CanBeSmi can_be_smi = kValueCanBeSmi, MemoryOrder memory_order = kRelaxedNonAtomic) = 0; virtual void StoreCompressedIntoObjectNoBarrier( - Register object, // Object we are storing into. - const Address& dest, // Where we are storing into. - Register value, // Value we are storing. + Register object, // Object being stored into. + const Address& address, // Address to store the value at. + Register value, // Value being stored. + MemoryOrder memory_order = kRelaxedNonAtomic) = 0; + virtual void StoreCompressedIntoObjectNoBarrier( + Register object, // Object being stored into. + const Address& address, // Address to store the value at. + const Object& value, // Value being stored. MemoryOrder memory_order = kRelaxedNonAtomic) = 0; + + virtual void LoadIndexedCompressed(Register dst, + Register base, + int32_t offset, + Register index) = 0; + virtual void StoreCompressedIntoArray( + Register object, + Register slot, + Register value, + CanBeSmi can_value_be_smi = kValueCanBeSmi) = 0; + + // Add a base virtual method for methods that take offsets which convert + // the base register and offset into an address appropriately. + // + // The latter should be overridden for architectures where offsets should not + // be converted to addresses without additional precautions. + // + // Since the methods are only virtual when using compressed pointers, the + // overriding definitions must be guarded by the appropriate #ifdef. + + virtual void LoadCompressedFromOffset(Register dst, + Register base, + int32_t offset); + virtual void StoreCompressedIntoObjectOffset( + Register object, // Object being stored into. + int32_t offset, // Offset into object. + Register value, // Value being stored. + CanBeSmi can_be_smi = kValueCanBeSmi, + MemoryOrder memory_order = kRelaxedNonAtomic); + virtual void StoreCompressedIntoObjectOffsetNoBarrier( + Register object, // Object being stored into. + int32_t offset, // Offset into object. + Register value, // Value being stored. + MemoryOrder memory_order = kRelaxedNonAtomic); + virtual void StoreCompressedIntoObjectOffsetNoBarrier( + Register object, // Object being stored into. + int32_t offset, // Offset into object. + const Object& value, // Value being stored. + MemoryOrder memory_order = kRelaxedNonAtomic); + + // Since loading Smis just involves zero extension instead of adjusting the + // high bits to be heap bits, these are non-virtual. + + // Loads a Smi, handling sign extension appropriately when compressed. + // In DEBUG mode, also checks that the loaded value is a Smi and halts if not. + void LoadCompressedSmi(Register dst, const Address& address) { + Load(dst, address, kUnsignedFourBytes); // Zero extension. + DEBUG_ONLY(VerifySmi(dst);) + } + + // Loads a Smi, handling sign extension appropriately when compressed. + // In DEBUG mode, also checks that the loaded value is a Smi and halts if not. + void LoadCompressedSmiFromOffset(Register dst, + Register base, + int32_t offset) { + LoadFromOffset(dst, base, offset, kUnsignedFourBytes); // Zero extension. + DEBUG_ONLY(VerifySmi(dst);) + } #else - virtual void LoadAcquireCompressed(Register dst, - Register address, - int32_t offset = 0) { + // The methods are non-virtual and forward to the uncompressed versions. + + void LoadCompressedFieldAddressForRegOffset(Register address, + Register instance, + Register offset_in_words_as_smi) { + LoadFieldAddressForRegOffset(address, instance, offset_in_words_as_smi); + } + + void LoadAcquireCompressed(Register dst, + Register address, + int32_t offset = 0) { LoadAcquire(dst, address, offset); } - virtual void LoadCompressedField(Register dst, const FieldAddress& address) { - LoadField(dst, address); + void StoreReleaseCompressed(Register src, + Register address, + int32_t offset = 0) { + StoreRelease(src, address, offset); } - virtual void LoadCompressedFieldFromOffset(Register dst, - Register base, - int32_t offset) { - LoadFieldFromOffset(dst, base, offset); + + void LoadCompressed(Register dst, const Address& address) { + Load(dst, address); } - virtual void StoreCompressedIntoObject( - Register object, // Object we are storing into. - const Address& dest, // Where we are storing into. - Register value, // Value we are storing. + + // There is no StoreCompressed because only Dart objects contain compressed + // pointers, so StoreCompressedIntoObject should be used instead. + + void StoreCompressedIntoObject( + Register object, // Object being stored into. + const Address& address, // Address to store the value at. + Register value, // Value being stored. CanBeSmi can_be_smi = kValueCanBeSmi, MemoryOrder memory_order = kRelaxedNonAtomic) { - StoreIntoObject(object, dest, value, can_be_smi); + StoreIntoObject(object, address, value, can_be_smi, memory_order); } - virtual void StoreCompressedIntoObjectNoBarrier( - Register object, // Object we are storing into. - const Address& dest, // Where we are storing into. - Register value, // Value we are storing. + void StoreCompressedIntoObjectNoBarrier( + Register object, // Object being stored into. + const Address& address, // Address to store the value at. + Register value, // Value being stored. MemoryOrder memory_order = kRelaxedNonAtomic) { - StoreIntoObjectNoBarrier(object, dest, value); + StoreIntoObjectNoBarrier(object, address, value, memory_order); + } + void StoreCompressedIntoObjectNoBarrier( + Register object, // Object being stored into. + const Address& address, // Address to store the value at. + const Object& value, // Value being stored. + MemoryOrder memory_order = kRelaxedNonAtomic) { + StoreIntoObjectNoBarrier(object, address, value, memory_order); + } + + void LoadIndexedCompressed(Register dst, + Register base, + int32_t offset, + Register index) { + LoadIndexedPayload(dst, base, offset, index, TIMES_WORD_SIZE, kWordBytes); + } + void StoreCompressedIntoArray(Register object, + Register slot, + Register value, + CanBeSmi can_value_be_smi = kValueCanBeSmi) { + StoreIntoArray(object, slot, value, can_value_be_smi); + } + + void LoadCompressedFromOffset(Register dst, Register base, int32_t offset) { + LoadFromOffset(dst, base, offset); + } + void StoreCompressedIntoObjectOffset( + Register object, // Object being stored into. + int32_t offset, // Offset into object. + Register value, // Value being stored. + CanBeSmi can_be_smi = kValueCanBeSmi, + MemoryOrder memory_order = kRelaxedNonAtomic) { + StoreIntoObjectOffset(object, offset, value, can_be_smi, memory_order); + } + void StoreCompressedIntoObjectOffsetNoBarrier( + Register object, // Object being stored into. + int32_t offset, // Offset into object. + Register value, // Value being stored. + MemoryOrder memory_order = kRelaxedNonAtomic) { + StoreIntoObjectOffsetNoBarrier(object, offset, value, memory_order); + } + void StoreCompressedIntoObjectOffsetNoBarrier( + Register object, // Object being stored into. + int32_t offset, // Offset into object. + const Object& value, // Value being stored. + MemoryOrder memory_order = kRelaxedNonAtomic) { + StoreIntoObjectOffsetNoBarrier(object, offset, value, memory_order); + } + + // Loads a Smi, handling sign extension appropriately when compressed. + // In DEBUG mode, also checks that the loaded value is a Smi and halts if not. + void LoadCompressedSmi(Register dst, const Address& address) { + LoadSmi(dst, address); + } + + // Loads a Smi, handling sign extension appropriately when compressed. + // In DEBUG mode, also checks that the loaded value is a Smi and halts if not. + void LoadCompressedSmiFromOffset(Register dst, + Register base, + int32_t offset) { + LoadSmiFromOffset(dst, base, offset); } #endif // defined(DART_COMPRESSED_POINTERS) - virtual void StoreRelease(Register src, - Register address, - int32_t offset = 0) = 0; + // These methods just delegate to the non-Field classes, either passing + // along a FieldAddress as the Address or adjusting the offset appropriately. + + void LoadCompressedField(Register dst, const FieldAddress& address); + void LoadCompressedFieldFromOffset(Register dst, + Register base, + int32_t offset) { + LoadCompressedFromOffset(dst, base, offset - kHeapObjectTag); + } + void LoadCompressedSmiFieldFromOffset(Register dst, + Register base, + int32_t offset) { + LoadCompressedSmiFromOffset(dst, base, offset - kHeapObjectTag); + } + + // There are no StoreCompressedField methods because only Dart objects contain + // compressed pointers and compressed pointers may require write barriers, so + // StoreCompressedIntoObject should be used instead. + + void LoadFromSlot(Register dst, Register base, const Slot& slot); + void StoreToSlot(Register src, + Register base, + const Slot& slot, + CanBeSmi can_be_smi, + MemoryOrder memory_order = kRelaxedNonAtomic); + void StoreToSlotNoBarrier(Register src, + Register base, + const Slot& slot, + MemoryOrder memory_order = kRelaxedNonAtomic); + // Uses the type information of the Slot to determine whether the field + // can be a Smi or not. + void StoreToSlot(Register src, + Register base, + const Slot& slot, + MemoryOrder memory_order = kRelaxedNonAtomic); // Truncates upper bits. virtual void LoadInt32FromBoxOrSmi(Register result, Register value) = 0; @@ -986,6 +1241,14 @@ class AssemblerBase : public StackResource { RangeCheckCondition condition, Label* target) = 0; + // Checks [dst] for a Smi, halting if it does not contain one. + void VerifySmi(Register dst) { + Label done; + BranchIfSmi(dst, &done, kNearJump); + Stop("Expected Smi"); + Bind(&done); + } + protected: AssemblerBuffer buffer_; // Contains position independent code. int32_t prologue_offset_; diff --git a/runtime/vm/compiler/assembler/assembler_ia32.cc b/runtime/vm/compiler/assembler/assembler_ia32.cc index 01eb2095bfaf..8b62ebd3d995 100644 --- a/runtime/vm/compiler/assembler/assembler_ia32.cc +++ b/runtime/vm/compiler/assembler/assembler_ia32.cc @@ -1796,9 +1796,7 @@ void Assembler::CompareRegisters(Register a, Register b) { cmpl(a, b); } -void Assembler::LoadFromOffset(Register reg, - const Address& address, - OperandSize type) { +void Assembler::Load(Register reg, const Address& address, OperandSize type) { switch (type) { case kByte: return movsxb(reg, address); @@ -1817,9 +1815,7 @@ void Assembler::LoadFromOffset(Register reg, } } -void Assembler::StoreToOffset(Register reg, - const Address& address, - OperandSize sz) { +void Assembler::Store(Register reg, const Address& address, OperandSize sz) { switch (sz) { case kByte: case kUnsignedByte: @@ -1836,7 +1832,7 @@ void Assembler::StoreToOffset(Register reg, } } -void Assembler::StoreToOffset(const Object& object, const Address& dst) { +void Assembler::Store(const Object& object, const Address& dst) { if (target::CanEmbedAsRawPointerInGeneratedCode(object)) { movl(dst, Immediate(target::ToRawPointer(object))); } else { @@ -2080,16 +2076,6 @@ void Assembler::CompareObject(Register reg, const Object& object) { } } -void Assembler::LoadCompressedSmi(Register dest, const Address& slot) { - movl(dest, slot); -#if defined(DEBUG) - Label done; - BranchIfSmi(dest, &done, kNearJump); - Stop("Expected Smi"); - Bind(&done); -#endif -} - void Assembler::StoreIntoObject(Register object, const Address& dest, Register value, diff --git a/runtime/vm/compiler/assembler/assembler_ia32.h b/runtime/vm/compiler/assembler/assembler_ia32.h index 52ba1af1d21b..a269c0bc5736 100644 --- a/runtime/vm/compiler/assembler/assembler_ia32.h +++ b/runtime/vm/compiler/assembler/assembler_ia32.h @@ -617,64 +617,22 @@ class Assembler : public AssemblerBase { j(condition, label, distance); } - // Arch-specific LoadFromOffset to choose the right operation for [sz]. - void LoadFromOffset(Register dst, - const Address& address, - OperandSize sz = kFourBytes) override; - void LoadFromOffset(Register dst, - Register base, - int32_t offset, - OperandSize sz = kFourBytes) { - LoadFromOffset(dst, Address(base, offset), sz); - } - void LoadField(Register dst, const FieldAddress& address) override { - LoadField(dst, address, kFourBytes); - } - void LoadField(Register dst, const FieldAddress& address, OperandSize sz) { - LoadFromOffset(dst, address, sz); - } - void LoadFieldFromOffset(Register reg, - Register base, - int32_t offset, - OperandSize sz = kFourBytes) override { - LoadFromOffset(reg, FieldAddress(base, offset), sz); - } - void LoadCompressedFieldFromOffset(Register reg, - Register base, - int32_t offset) override { - LoadFieldFromOffset(reg, base, offset); - } + // Arch-specific Load to choose the right operation for [sz]. + void Load(Register dst, + const Address& address, + OperandSize sz = kFourBytes) override; void LoadIndexedPayload(Register dst, Register base, int32_t payload_offset, Register index, ScaleFactor scale, - OperandSize sz = kFourBytes) { - LoadFromOffset(dst, FieldAddress(base, index, scale, payload_offset), sz); - } - void LoadIndexedCompressed(Register dst, - Register base, - int32_t offset, - Register index) { - LoadCompressedField( - dst, FieldAddress(base, index, TIMES_COMPRESSED_WORD_SIZE, offset)); - } - void StoreToOffset(Register src, - const Address& address, - OperandSize sz = kFourBytes) override; - void StoreToOffset(Register src, - Register base, - int32_t offset, - OperandSize sz = kFourBytes) { - StoreToOffset(src, Address(base, offset), sz); - } - void StoreToOffset(const Object& value, const Address& address); - void StoreFieldToOffset(Register src, - Register base, - int32_t offset, - OperandSize sz = kFourBytes) { - StoreToOffset(src, FieldAddress(base, offset), sz); + OperandSize sz = kFourBytes) override { + Load(dst, FieldAddress(base, index, scale, payload_offset), sz); } + void Store(Register src, + const Address& address, + OperandSize sz = kFourBytes) override; + void Store(const Object& value, const Address& address); void StoreZero(const Address& address, Register temp = kNoRegister) { movl(address, Immediate(0)); } @@ -718,7 +676,7 @@ class Assembler : public AssemblerBase { OperandSize size = kFourBytes) override { // On intel loads have load-acquire behavior (i.e. loads are not re-ordered // with other loads). - LoadFromOffset(dst, Address(address, offset), size); + Load(dst, Address(address, offset), size); } void StoreRelease(Register src, Register address, @@ -848,9 +806,6 @@ class Assembler : public AssemblerBase { void PushObject(const Object& object); void CompareObject(Register reg, const Object& object); - void LoadCompressed(Register dest, const Address& slot) { movl(dest, slot); } - void LoadCompressedSmi(Register dst, const Address& slot) override; - // Store into a heap object and apply the generational write barrier. (Unlike // the other architectures, this does not apply the incremental write barrier, // and so concurrent marking is not enabled for now on IA32.) All stores into @@ -874,24 +829,40 @@ class Assembler : public AssemblerBase { void StoreIntoArray(Register object, // Object we are storing into. Register slot, // Where we are storing into. Register value, // Value we are storing. - CanBeSmi can_value_be_smi = kValueCanBeSmi, - Register scratch = kNoRegister); + CanBeSmi can_value_be_smi = kValueCanBeSmi) override { + StoreIntoArray(object, slot, value, can_value_be_smi, kNoRegister); + } + void StoreIntoArray(Register object, // Object we are storing into. + Register slot, // Where we are storing into. + Register value, // Value we are storing. + CanBeSmi can_value_be_smi, + Register scratch); void StoreIntoObjectNoBarrier( Register object, const Address& dest, Register value, MemoryOrder memory_order = kRelaxedNonAtomic) override; - void StoreIntoObjectNoBarrier(Register object, - const Address& dest, - const Object& value, - MemoryOrder memory_order = kRelaxedNonAtomic); + void StoreIntoObjectNoBarrier( + Register object, + const Address& dest, + const Object& value, + MemoryOrder memory_order = kRelaxedNonAtomic) override; + void StoreIntoObjectOffset( + Register object, // Object we are storing into. + int32_t offset, // Where we are storing into. + Register value, // Value we are storing. + CanBeSmi can_value_be_smi = kValueCanBeSmi, + MemoryOrder memory_order = kRelaxedNonAtomic) override { + StoreIntoObjectOffset(object, offset, value, can_value_be_smi, memory_order, + kNoRegister); + } void StoreIntoObjectOffset(Register object, // Object we are storing into. int32_t offset, // Where we are storing into. Register value, // Value we are storing. - CanBeSmi can_value_be_smi = kValueCanBeSmi, - MemoryOrder memory_order = kRelaxedNonAtomic, - Register scratch = kNoRegister) { + CanBeSmi can_value_be_smi, + MemoryOrder memory_order, + Register scratch) { StoreIntoObject(object, FieldAddress(object, offset), value, can_value_be_smi, memory_order, scratch); } @@ -899,7 +870,7 @@ class Assembler : public AssemblerBase { Register object, int32_t offset, Register value, - MemoryOrder memory_order = kRelaxedNonAtomic) { + MemoryOrder memory_order = kRelaxedNonAtomic) override { StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value, memory_order); } @@ -907,7 +878,7 @@ class Assembler : public AssemblerBase { Register object, int32_t offset, const Object& value, - MemoryOrder memory_order = kRelaxedNonAtomic) { + MemoryOrder memory_order = kRelaxedNonAtomic) override { StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value, memory_order); } @@ -1042,8 +1013,8 @@ class Assembler : public AssemblerBase { void LoadStaticFieldAddress(Register address, Register field, Register scratch) { - LoadCompressedFieldFromOffset( - scratch, field, target::Field::host_offset_or_field_id_offset()); + LoadFieldFromOffset(scratch, field, + target::Field::host_offset_or_field_id_offset()); const intptr_t field_table_offset = compiler::target::Thread::field_table_values_offset(); LoadMemoryValue(address, THR, static_cast(field_table_offset)); @@ -1051,15 +1022,9 @@ class Assembler : public AssemblerBase { leal(address, Address(address, scratch, TIMES_HALF_WORD_SIZE, 0)); } - void LoadCompressedFieldAddressForRegOffset(Register address, - Register instance, - Register offset_in_words_as_smi) { - LoadFieldAddressForRegOffset(address, instance, offset_in_words_as_smi); - } - void LoadFieldAddressForRegOffset(Register address, Register instance, - Register offset_in_words_as_smi) { + Register offset_in_words_as_smi) override { static_assert(kSmiTagShift == 1, "adjust scale factor"); leal(address, FieldAddress(instance, offset_in_words_as_smi, TIMES_2, 0)); } @@ -1122,7 +1087,7 @@ class Assembler : public AssemblerBase { Label* equals) override; void Align(intptr_t alignment, intptr_t offset); - void Bind(Label* label); + void Bind(Label* label) override; void Jump(Label* label, JumpDistance distance = kFarJump) { jmp(label, distance); } diff --git a/runtime/vm/compiler/assembler/assembler_riscv.cc b/runtime/vm/compiler/assembler/assembler_riscv.cc index 9d6c2be458fb..42b3f72dbbd5 100644 --- a/runtime/vm/compiler/assembler/assembler_riscv.cc +++ b/runtime/vm/compiler/assembler/assembler_riscv.cc @@ -2567,10 +2567,6 @@ void Assembler::Jump(const Address& address) { jr(TMP2); } -void Assembler::LoadField(Register dst, const FieldAddress& address) { - lx(dst, address); -} - #if defined(TARGET_USES_THREAD_SANITIZER) void Assembler::TsanLoadAcquire(Register addr) { LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true); @@ -2602,29 +2598,11 @@ void Assembler::LoadAcquire(Register dst, #endif } -void Assembler::LoadAcquireCompressed(Register dst, - Register address, - int32_t offset) { - LoadAcquire(dst, address, offset); -} - void Assembler::StoreRelease(Register src, Register address, int32_t offset) { fence(HartEffects::kMemory, HartEffects::kWrite); StoreToOffset(src, address, offset); } -void Assembler::StoreReleaseCompressed(Register src, - Register address, - int32_t offset) { - UNIMPLEMENTED(); -} - -void Assembler::CompareWithCompressedFieldFromOffset(Register value, - Register base, - int32_t offset) { - UNIMPLEMENTED(); -} - void Assembler::CompareWithMemoryValue(Register value, Address address, OperandSize size) { @@ -3188,9 +3166,7 @@ Address Assembler::PrepareLargeOffset(Register base, int32_t offset) { return Address(TMP2, lo); } -void Assembler::LoadFromOffset(Register dest, - const Address& address, - OperandSize sz) { +void Assembler::Load(Register dest, const Address& address, OperandSize sz) { Address addr = PrepareLargeOffset(address.base(), address.offset()); switch (sz) { #if XLEN == 64 @@ -3228,12 +3204,6 @@ void Assembler::LoadIndexedPayload(Register dest, AddShifted(TMP, base, index, scale); LoadFromOffset(dest, TMP, payload_offset - kHeapObjectTag, sz); } -void Assembler::LoadIndexedCompressed(Register dest, - Register base, - int32_t offset, - Register index) { - LoadIndexedPayload(dest, base, offset, index, TIMES_WORD_SIZE, kObjectBytes); -} void Assembler::LoadSFromOffset(FRegister dest, Register base, int32_t offset) { flw(dest, PrepareLargeOffset(base, offset)); @@ -3253,9 +3223,7 @@ void Assembler::CompareToStack(Register src, intptr_t depth) { CompareWithMemoryValue(src, Address(SPREG, target::kWordSize * depth)); } -void Assembler::StoreToOffset(Register src, - const Address& address, - OperandSize sz) { +void Assembler::Store(Register src, const Address& address, OperandSize sz) { Address addr = PrepareLargeOffset(address.base(), address.offset()); switch (sz) { #if XLEN == 64 @@ -3296,16 +3264,9 @@ void Assembler::StoreIntoObject(Register object, MemoryOrder memory_order) { // stlr does not feature an address operand. ASSERT(memory_order == kRelaxedNonAtomic); - StoreToOffset(value, dest); + Store(value, dest); StoreBarrier(object, value, can_value_be_smi); } -void Assembler::StoreCompressedIntoObject(Register object, - const Address& dest, - Register value, - CanBeSmi can_value_be_smi, - MemoryOrder memory_order) { - StoreIntoObject(object, dest, value, can_value_be_smi, memory_order); -} void Assembler::StoreBarrier(Register object, Register value, CanBeSmi can_value_be_smi) { @@ -3371,12 +3332,6 @@ void Assembler::StoreIntoArray(Register object, sx(value, Address(slot, 0)); StoreIntoArrayBarrier(object, slot, value, can_value_be_smi); } -void Assembler::StoreCompressedIntoArray(Register object, - Register slot, - Register value, - CanBeSmi can_value_be_smi) { - StoreIntoArray(object, slot, value, can_value_be_smi); -} void Assembler::StoreIntoArrayBarrier(Register object, Register slot, Register value, @@ -3436,19 +3391,12 @@ void Assembler::StoreIntoObjectOffset(Register object, } StoreBarrier(object, value, can_value_be_smi); } -void Assembler::StoreCompressedIntoObjectOffset(Register object, - int32_t offset, - Register value, - CanBeSmi can_value_be_smi, - MemoryOrder memory_order) { - StoreIntoObjectOffset(object, offset, value, can_value_be_smi, memory_order); -} void Assembler::StoreIntoObjectNoBarrier(Register object, const Address& dest, Register value, MemoryOrder memory_order) { ASSERT(memory_order == kRelaxedNonAtomic); - StoreToOffset(value, dest); + Store(value, dest); #if defined(DEBUG) // We can't assert the incremental barrier is not needed here, only the // generational barrier. We sometimes omit the write barrier when 'value' is @@ -3467,12 +3415,6 @@ void Assembler::StoreIntoObjectNoBarrier(Register object, Bind(&done); #endif } -void Assembler::StoreCompressedIntoObjectNoBarrier(Register object, - const Address& dest, - Register value, - MemoryOrder memory_order) { - StoreIntoObjectNoBarrier(object, dest, value, memory_order); -} void Assembler::StoreIntoObjectOffsetNoBarrier(Register object, int32_t offset, Register value, @@ -3500,13 +3442,6 @@ void Assembler::StoreIntoObjectOffsetNoBarrier(Register object, Bind(&done); #endif } -void Assembler::StoreCompressedIntoObjectOffsetNoBarrier( - Register object, - int32_t offset, - Register value, - MemoryOrder memory_order) { - StoreIntoObjectOffsetNoBarrier(object, offset, value, memory_order); -} void Assembler::StoreIntoObjectNoBarrier(Register object, const Address& dest, const Object& value, @@ -3528,12 +3463,6 @@ void Assembler::StoreIntoObjectNoBarrier(Register object, } sx(value_reg, dest); } -void Assembler::StoreCompressedIntoObjectNoBarrier(Register object, - const Address& dest, - const Object& value, - MemoryOrder memory_order) { - UNIMPLEMENTED(); -} void Assembler::StoreIntoObjectOffsetNoBarrier(Register object, int32_t offset, const Object& value, @@ -3555,13 +3484,6 @@ void Assembler::StoreIntoObjectOffsetNoBarrier(Register object, StoreIntoObjectNoBarrier(object, Address(TMP), value); } } -void Assembler::StoreCompressedIntoObjectOffsetNoBarrier( - Register object, - int32_t offset, - const Object& value, - MemoryOrder memory_order) { - UNIMPLEMENTED(); -} // Stores a non-tagged value into a heap object. void Assembler::StoreInternalPointer(Register object, @@ -4492,8 +4414,7 @@ void Assembler::TryAllocateObject(intptr_t cid, const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size); LoadImmediate(temp_reg, tags); - StoreToOffset(temp_reg, - FieldAddress(instance_reg, target::Object::tags_offset())); + Store(temp_reg, FieldAddress(instance_reg, target::Object::tags_offset())); } else { j(failure, distance); } @@ -4654,15 +4575,6 @@ void Assembler::LoadStaticFieldAddress(Register address, add(address, address, scratch); } -void Assembler::LoadCompressedFieldAddressForRegOffset( - Register address, - Register instance, - Register offset_in_words_as_smi) { - AddShifted(address, instance, offset_in_words_as_smi, - target::kCompressedWordSizeLog2 - kSmiTagShift); - addi(address, address, -kHeapObjectTag); -} - void Assembler::LoadFieldAddressForRegOffset(Register address, Register instance, Register offset_in_words_as_smi) { diff --git a/runtime/vm/compiler/assembler/assembler_riscv.h b/runtime/vm/compiler/assembler/assembler_riscv.h index 6884ade6eb12..92b509ae663a 100644 --- a/runtime/vm/compiler/assembler/assembler_riscv.h +++ b/runtime/vm/compiler/assembler/assembler_riscv.h @@ -856,7 +856,7 @@ class Assembler : public MicroAssembler { } } - void Bind(Label* label) { MicroAssembler::Bind(label); } + void Bind(Label* label) override { MicroAssembler::Bind(label); } // Unconditional jump to a given label. void Jump(Label* label, JumpDistance distance = kFarJump) { j(label, distance); @@ -866,10 +866,6 @@ class Assembler : public MicroAssembler { // Unconditional jump to a given address in memory. Clobbers TMP. void Jump(const Address& address); - void LoadField(Register dst, const FieldAddress& address) override; - void LoadCompressedField(Register dst, const FieldAddress& address) override { - LoadCompressed(dst, address); - } void LoadMemoryValue(Register dst, Register base, int32_t offset) { LoadFromOffset(dst, base, offset, kWordBytes); } @@ -887,22 +883,10 @@ class Assembler : public MicroAssembler { int32_t offset = 0, OperandSize size = kWordBytes) override; - void LoadAcquireCompressed(Register dst, - Register address, - int32_t offset = 0) override; - void StoreRelease(Register src, Register address, int32_t offset = 0) override; - void StoreReleaseCompressed(Register src, - Register address, - int32_t offset = 0); - - void CompareWithCompressedFieldFromOffset(Register value, - Register base, - int32_t offset); - void CompareWithMemoryValue(Register value, Address address, OperandSize size = kWordBytes) override; @@ -1119,31 +1103,9 @@ class Assembler : public MicroAssembler { OperandSize sz = kWordBytes) override; Address PrepareLargeOffset(Register base, int32_t offset); - void LoadFromOffset(Register dest, - const Address& address, - OperandSize sz = kWordBytes) override; - void LoadFromOffset(Register dest, - Register base, - int32_t offset, - OperandSize sz = kWordBytes) { - LoadFromOffset(dest, Address(base, offset), sz); - } - void LoadFieldFromOffset(Register dest, - Register base, - int32_t offset, - OperandSize sz = kWordBytes) override { - LoadFromOffset(dest, base, offset - kHeapObjectTag, sz); - } - void LoadCompressedFieldFromOffset(Register dest, - Register base, - int32_t offset) override { - LoadCompressedFromOffset(dest, base, offset - kHeapObjectTag); - } - void LoadCompressedSmiFieldFromOffset(Register dest, - Register base, - int32_t offset) { - LoadCompressedSmiFromOffset(dest, base, offset - kHeapObjectTag); - } + void Load(Register dest, + const Address& address, + OperandSize sz = kWordBytes) override; // For loading indexed payloads out of tagged objects like Arrays. If the // payload objects are word-sized, use TIMES_HALF_WORD_SIZE if the contents of // [index] is a Smi, otherwise TIMES_WORD_SIZE if unboxed. @@ -1152,11 +1114,7 @@ class Assembler : public MicroAssembler { int32_t payload_offset, Register index, ScaleFactor scale, - OperandSize sz = kWordBytes); - void LoadIndexedCompressed(Register dest, - Register base, - int32_t offset, - Register index); + OperandSize sz = kWordBytes) override; void LoadSFromOffset(FRegister dest, Register base, int32_t offset); void LoadDFromOffset(FRegister dest, Register base, int32_t offset); void LoadSFieldFromOffset(FRegister dest, Register base, int32_t offset) { @@ -1170,23 +1128,11 @@ class Assembler : public MicroAssembler { void StoreToStack(Register src, intptr_t depth); void CompareToStack(Register src, intptr_t depth); - void StoreToOffset(Register src, - const Address& address, - OperandSize sz = kWordBytes) override; - void StoreToOffset(Register src, - Register base, - int32_t offset, - OperandSize sz = kWordBytes) { - StoreToOffset(src, Address(base, offset), sz); - } - void StoreFieldToOffset(Register src, - Register base, - int32_t offset, - OperandSize sz = kWordBytes) { - StoreToOffset(src, FieldAddress(base, offset), sz); - } + void Store(Register src, + const Address& address, + OperandSize sz = kWordBytes) override; void StoreZero(const Address& address, Register temp = kNoRegister) { - StoreToOffset(ZR, address); + Store(ZR, address); } void StoreSToOffset(FRegister src, Register base, int32_t offset); void StoreSFieldToOffset(FRegister src, Register base, int32_t offset) { @@ -1220,27 +1166,6 @@ class Assembler : public MicroAssembler { UNREACHABLE(); } - void LoadCompressed(Register dest, const Address& slot) { - LoadFromOffset(dest, slot); - } - void LoadCompressedFromOffset(Register dest, Register base, int32_t offset) { - LoadFromOffset(dest, base, offset); - } - void LoadCompressedSmi(Register dest, const Address& slot) override { - LoadFromOffset(dest, slot); -#if defined(DEBUG) - Label done; - BranchIfSmi(dest, &done, kNearJump); - Stop("Expected Smi"); - Bind(&done); -#endif - } - void LoadCompressedSmiFromOffset(Register dest, - Register base, - int32_t offset) { - LoadFromOffset(dest, base, offset); - } - // Store into a heap object and apply the generational and incremental write // barriers. All stores into heap objects must pass through this function or, // if the value can be proven either Smi or old-and-premarked, its NoBarrier @@ -1251,43 +1176,23 @@ class Assembler : public MicroAssembler { Register value, CanBeSmi can_value_be_smi = kValueCanBeSmi, MemoryOrder memory_order = kRelaxedNonAtomic) override; - void StoreCompressedIntoObject( - Register object, - const Address& dest, - Register value, - CanBeSmi can_value_be_smi = kValueCanBeSmi, - MemoryOrder memory_order = kRelaxedNonAtomic) override; void StoreBarrier(Register object, Register value, CanBeSmi can_value_be_smi); void StoreIntoArray(Register object, Register slot, Register value, - CanBeSmi can_value_be_smi = kValueCanBeSmi); - void StoreCompressedIntoArray(Register object, - Register slot, - Register value, - CanBeSmi can_value_be_smi = kValueCanBeSmi); + CanBeSmi can_value_be_smi = kValueCanBeSmi) override; void StoreIntoArrayBarrier(Register object, Register slot, Register value, CanBeSmi can_value_be_smi); - void StoreIntoObjectOffset(Register object, - int32_t offset, - Register value, - CanBeSmi can_value_be_smi = kValueCanBeSmi, - MemoryOrder memory_order = kRelaxedNonAtomic); - void StoreCompressedIntoObjectOffset( + void StoreIntoObjectOffset( Register object, int32_t offset, Register value, CanBeSmi can_value_be_smi = kValueCanBeSmi, - MemoryOrder memory_order = kRelaxedNonAtomic); - void StoreIntoObjectNoBarrier( - Register object, - const Address& dest, - Register value, MemoryOrder memory_order = kRelaxedNonAtomic) override; - void StoreCompressedIntoObjectNoBarrier( + void StoreIntoObjectNoBarrier( Register object, const Address& dest, Register value, @@ -1296,31 +1201,17 @@ class Assembler : public MicroAssembler { Register object, int32_t offset, Register value, - MemoryOrder memory_order = kRelaxedNonAtomic); - void StoreCompressedIntoObjectOffsetNoBarrier( - Register object, - int32_t offset, - Register value, - MemoryOrder memory_order = kRelaxedNonAtomic); - void StoreIntoObjectNoBarrier(Register object, - const Address& dest, - const Object& value, - MemoryOrder memory_order = kRelaxedNonAtomic); - void StoreCompressedIntoObjectNoBarrier( + MemoryOrder memory_order = kRelaxedNonAtomic) override; + void StoreIntoObjectNoBarrier( Register object, const Address& dest, const Object& value, - MemoryOrder memory_order = kRelaxedNonAtomic); + MemoryOrder memory_order = kRelaxedNonAtomic) override; void StoreIntoObjectOffsetNoBarrier( Register object, int32_t offset, const Object& value, - MemoryOrder memory_order = kRelaxedNonAtomic); - void StoreCompressedIntoObjectOffsetNoBarrier( - Register object, - int32_t offset, - const Object& value, - MemoryOrder memory_order = kRelaxedNonAtomic); + MemoryOrder memory_order = kRelaxedNonAtomic) override; // Stores a non-tagged value into a heap object. void StoreInternalPointer(Register object, @@ -1590,13 +1481,9 @@ class Assembler : public MicroAssembler { Register field, Register scratch); - void LoadCompressedFieldAddressForRegOffset(Register address, - Register instance, - Register offset_in_words_as_smi); - void LoadFieldAddressForRegOffset(Register address, Register instance, - Register offset_in_words_as_smi); + Register offset_in_words_as_smi) override; void LoadFieldAddressForOffset(Register address, Register instance, diff --git a/runtime/vm/compiler/assembler/assembler_x64.cc b/runtime/vm/compiler/assembler/assembler_x64.cc index 4594100d46ae..26c400c2822a 100644 --- a/runtime/vm/compiler/assembler/assembler_x64.cc +++ b/runtime/vm/compiler/assembler/assembler_x64.cc @@ -1545,28 +1545,12 @@ void Assembler::LoadQImmediate(FpuRegister dst, simd128_value_t immediate) { kHeapObjectTag)); } +#if defined(DART_COMPRESSED_POINTERS) void Assembler::LoadCompressed(Register dest, const Address& slot) { -#if !defined(DART_COMPRESSED_POINTERS) - movq(dest, slot); -#else movl(dest, slot); // Zero-extension. addq(dest, Address(THR, target::Thread::heap_base_offset())); -#endif } - -void Assembler::LoadCompressedSmi(Register dest, const Address& slot) { -#if !defined(DART_COMPRESSED_POINTERS) - movq(dest, slot); -#else - movl(dest, slot); // Zero-extension. #endif -#if defined(DEBUG) - Label done; - BranchIfSmi(dest, &done, kNearJump); - Stop("Expected Smi"); - Bind(&done); -#endif -} void Assembler::StoreIntoObject(Register object, const Address& dest, @@ -1581,6 +1565,7 @@ void Assembler::StoreIntoObject(Register object, StoreBarrier(object, value, can_be_smi); } +#if defined(DART_COMPRESSED_POINTERS) void Assembler::StoreCompressedIntoObject(Register object, const Address& dest, Register value, @@ -1593,6 +1578,7 @@ void Assembler::StoreCompressedIntoObject(Register object, } StoreBarrier(object, value, can_be_smi); } +#endif void Assembler::StoreBarrier(Register object, Register value, @@ -1652,6 +1638,7 @@ void Assembler::StoreIntoArray(Register object, StoreIntoArrayBarrier(object, slot, value, can_be_smi); } +#if defined(DART_COMPRESSED_POINTERS) void Assembler::StoreCompressedIntoArray(Register object, Register slot, Register value, @@ -1659,6 +1646,7 @@ void Assembler::StoreCompressedIntoArray(Register object, OBJ(mov)(Address(slot, 0), value); StoreIntoArrayBarrier(object, slot, value, can_be_smi); } +#endif void Assembler::StoreIntoArrayBarrier(Register object, Register slot, @@ -1728,6 +1716,7 @@ void Assembler::StoreIntoObjectNoBarrier(Register object, // No store buffer update. } +#if defined(DART_COMPRESSED_POINTERS) void Assembler::StoreCompressedIntoObjectNoBarrier(Register object, const Address& dest, Register value, @@ -1756,6 +1745,7 @@ void Assembler::StoreCompressedIntoObjectNoBarrier(Register object, #endif // defined(DEBUG) // No store buffer update. } +#endif void Assembler::StoreIntoObjectNoBarrier(Register object, const Address& dest, @@ -1769,6 +1759,7 @@ void Assembler::StoreIntoObjectNoBarrier(Register object, } } +#if defined(DART_COMPRESSED_POINTERS) void Assembler::StoreCompressedIntoObjectNoBarrier(Register object, const Address& dest, const Object& value, @@ -1776,6 +1767,7 @@ void Assembler::StoreCompressedIntoObjectNoBarrier(Register object, LoadObject(TMP, value); StoreCompressedIntoObjectNoBarrier(object, dest, TMP, memory_order); } +#endif void Assembler::StoreInternalPointer(Register object, const Address& dest, @@ -1830,9 +1822,7 @@ void Assembler::Bind(Label* label) { label->BindTo(bound); } -void Assembler::LoadFromOffset(Register reg, - const Address& address, - OperandSize sz) { +void Assembler::Load(Register reg, const Address& address, OperandSize sz) { switch (sz) { case kByte: return movsxb(reg, address); @@ -1854,9 +1844,7 @@ void Assembler::LoadFromOffset(Register reg, } } -void Assembler::StoreToOffset(Register reg, - const Address& address, - OperandSize sz) { +void Assembler::Store(Register reg, const Address& address, OperandSize sz) { switch (sz) { case kByte: case kUnsignedByte: diff --git a/runtime/vm/compiler/assembler/assembler_x64.h b/runtime/vm/compiler/assembler/assembler_x64.h index be444c98fce4..be3207f63569 100644 --- a/runtime/vm/compiler/assembler/assembler_x64.h +++ b/runtime/vm/compiler/assembler/assembler_x64.h @@ -856,9 +856,9 @@ class Assembler : public AssemblerBase { void PushObject(const Object& object); void CompareObject(Register reg, const Object& object); - void LoadCompressed(Register dest, const Address& slot); - void LoadCompressedSmi(Register dest, const Address& slot) override; - +#if defined(DART_COMPRESSED_POINTERS) + void LoadCompressed(Register dest, const Address& slot) override; +#endif // Store into a heap object and apply the generational and incremental write // barriers. All stores into heap objects must pass through this function or, // if the value can be proven either Smi or old-and-premarked, its NoBarrier @@ -869,93 +869,52 @@ class Assembler : public AssemblerBase { Register value, // Value we are storing. CanBeSmi can_be_smi = kValueCanBeSmi, MemoryOrder memory_order = kRelaxedNonAtomic) override; - void StoreIntoObjectOffset(Register object, // Object we are storing into. - int32_t offset, // Where we are storing into. - Register value, // Value we are storing. - CanBeSmi can_be_smi = kValueCanBeSmi, - MemoryOrder memory_order = kRelaxedNonAtomic) { - StoreIntoObject(object, FieldAddress(object, offset), value, can_be_smi, - memory_order); - } +#if defined(DART_COMPRESSED_POINTERS) void StoreCompressedIntoObject( Register object, // Object we are storing into. const Address& dest, // Where we are storing into. Register value, // Value we are storing. CanBeSmi can_be_smi = kValueCanBeSmi, MemoryOrder memory_order = kRelaxedNonAtomic) override; - void StoreCompressedIntoObjectOffset( - Register object, // Object we are storing into. - int32_t offset, // Where we are storing into. - Register value, // Value we are storing. - CanBeSmi can_be_smi = kValueCanBeSmi, - MemoryOrder memory_order = kRelaxedNonAtomic) { - StoreCompressedIntoObject(object, FieldAddress(object, offset), value, - can_be_smi, memory_order); - } +#endif void StoreBarrier(Register object, // Object we are storing into. Register value, // Value we are storing. CanBeSmi can_be_smi); void StoreIntoArray(Register object, // Object we are storing into. Register slot, // Where we are storing into. Register value, // Value we are storing. - CanBeSmi can_be_smi = kValueCanBeSmi); + CanBeSmi can_be_smi = kValueCanBeSmi) override; +#if defined(DART_COMPRESSED_POINTERS) void StoreCompressedIntoArray(Register object, // Object we are storing into. Register slot, // Where we are storing into. Register value, // Value we are storing. - CanBeSmi can_be_smi = kValueCanBeSmi); + CanBeSmi can_be_smi = kValueCanBeSmi) override; +#endif void StoreIntoObjectNoBarrier( Register object, const Address& dest, Register value, MemoryOrder memory_order = kRelaxedNonAtomic) override; +#if defined(DART_COMPRESSED_POINTERS) void StoreCompressedIntoObjectNoBarrier( Register object, const Address& dest, Register value, MemoryOrder memory_order = kRelaxedNonAtomic) override; - void StoreIntoObjectNoBarrier(Register object, - const Address& dest, - const Object& value, - MemoryOrder memory_order = kRelaxedNonAtomic); - void StoreCompressedIntoObjectNoBarrier( +#endif + void StoreIntoObjectNoBarrier( Register object, const Address& dest, const Object& value, - MemoryOrder memory_order = kRelaxedNonAtomic); - - void StoreIntoObjectOffsetNoBarrier( - Register object, - int32_t offset, - Register value, - MemoryOrder memory_order = kRelaxedNonAtomic) { - StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value, - memory_order); - } - void StoreCompressedIntoObjectOffsetNoBarrier( - Register object, - int32_t offset, - Register value, - MemoryOrder memory_order = kRelaxedNonAtomic) { - StoreCompressedIntoObjectNoBarrier(object, FieldAddress(object, offset), - value, memory_order); - } - void StoreIntoObjectOffsetNoBarrier( - Register object, - int32_t offset, - const Object& value, - MemoryOrder memory_order = kRelaxedNonAtomic) { - StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value, - memory_order); - } - void StoreCompressedIntoObjectOffsetNoBarrier( + MemoryOrder memory_order = kRelaxedNonAtomic) override; +#if defined(DART_COMPRESSED_POINTERS) + void StoreCompressedIntoObjectNoBarrier( Register object, - int32_t offset, + const Address& dest, const Object& value, - MemoryOrder memory_order = kRelaxedNonAtomic) { - StoreCompressedIntoObjectNoBarrier(object, FieldAddress(object, offset), - value, memory_order); - } + MemoryOrder memory_order = kRelaxedNonAtomic) override; +#endif // Stores a non-tagged value into a heap object. void StoreInternalPointer(Register object, @@ -1109,7 +1068,7 @@ class Assembler : public AssemblerBase { Label* equals) override; void Align(int alignment, intptr_t offset); - void Bind(Label* label); + void Bind(Label* label) override; // Unconditional jump to a given label. void Jump(Label* label, JumpDistance distance = kFarJump) { jmp(label, distance); @@ -1122,65 +1081,29 @@ class Assembler : public AssemblerBase { void Jump(const Address& address) { jmp(address); } // Arch-specific LoadFromOffset to choose the right operation for [sz]. - void LoadFromOffset(Register dst, - const Address& address, - OperandSize sz = kEightBytes) override; - void LoadFromOffset(Register dst, - Register base, - int32_t offset, - OperandSize sz = kEightBytes) { - LoadFromOffset(dst, Address(base, offset), sz); - } - void LoadField(Register dst, const FieldAddress& address) override { - LoadField(dst, address, kEightBytes); - } - void LoadField(Register dst, const FieldAddress& address, OperandSize sz) { - LoadFromOffset(dst, address, sz); - } - void LoadCompressedField(Register dst, const FieldAddress& address) override { - LoadCompressed(dst, address); - } - void LoadFieldFromOffset(Register dst, - Register base, - int32_t offset, - OperandSize sz = kEightBytes) override { - LoadFromOffset(dst, FieldAddress(base, offset), sz); - } - void LoadCompressedFieldFromOffset(Register dst, - Register base, - int32_t offset) override { - LoadCompressed(dst, FieldAddress(base, offset)); - } + void Load(Register dst, + const Address& address, + OperandSize sz = kEightBytes) override; void LoadIndexedPayload(Register dst, Register base, int32_t payload_offset, Register index, ScaleFactor scale, - OperandSize sz = kEightBytes) { - LoadFromOffset(dst, FieldAddress(base, index, scale, payload_offset), sz); + OperandSize sz = kEightBytes) override { + Load(dst, FieldAddress(base, index, scale, payload_offset), sz); } +#if defined(DART_COMPRESSED_POINTERS) void LoadIndexedCompressed(Register dst, Register base, int32_t offset, - Register index) { + Register index) override { LoadCompressed( dst, FieldAddress(base, index, TIMES_COMPRESSED_WORD_SIZE, offset)); } - void StoreToOffset(Register src, - const Address& address, - OperandSize sz = kEightBytes) override; - void StoreToOffset(Register src, - Register base, - int32_t offset, - OperandSize sz = kEightBytes) { - StoreToOffset(src, Address(base, offset), sz); - } - void StoreFieldToOffset(Register src, - Register base, - int32_t offset, - OperandSize sz = kEightBytes) { - StoreToOffset(src, FieldAddress(base, offset), sz); - } +#endif + void Store(Register src, + const Address& address, + OperandSize sz = kEightBytes) override; void StoreZero(const Address& address, Register temp = kNoRegister) { movq(address, Immediate(0)); } @@ -1235,11 +1158,12 @@ class Assembler : public AssemblerBase { OperandSize size = kEightBytes) override { // On intel loads have load-acquire behavior (i.e. loads are not re-ordered // with other loads). - LoadFromOffset(dst, Address(address, offset), size); + Load(dst, Address(address, offset), size); #if defined(TARGET_USES_THREAD_SANITIZER) TsanLoadAcquire(Address(address, offset)); #endif } +#if defined(DART_COMPRESSED_POINTERS) void LoadAcquireCompressed(Register dst, Register address, int32_t offset = 0) override { @@ -1250,6 +1174,7 @@ class Assembler : public AssemblerBase { TsanLoadAcquire(Address(address, offset)); #endif } +#endif void StoreRelease(Register src, Register address, int32_t offset = 0) override { @@ -1260,9 +1185,10 @@ class Assembler : public AssemblerBase { TsanStoreRelease(Address(address, offset)); #endif } +#if defined(DART_COMPRESSED_POINTERS) void StoreReleaseCompressed(Register src, Register address, - int32_t offset = 0) { + int32_t offset = 0) override { // On intel stores have store-release behavior (i.e. stores are not // re-ordered with other stores). OBJ(mov)(Address(address, offset), src); @@ -1270,6 +1196,7 @@ class Assembler : public AssemblerBase { TsanStoreRelease(Address(address, offset)); #endif } +#endif void CompareWithMemoryValue(Register value, Address address, @@ -1281,11 +1208,6 @@ class Assembler : public AssemblerBase { cmpq(value, address); } } - void CompareWithCompressedFieldFromOffset(Register value, - Register base, - int32_t offset) { - OBJ(cmp)(value, FieldAddress(base, offset)); - } void RestoreCodePointer(); void LoadPoolPointer(Register pp = PP); @@ -1458,18 +1380,21 @@ class Assembler : public AssemblerBase { void LoadFieldAddressForRegOffset(Register address, Register instance, - Register offset_in_words_as_smi) { + Register offset_in_words_as_smi) override { static_assert(kSmiTagShift == 1, "adjust scale factor"); leaq(address, FieldAddress(instance, offset_in_words_as_smi, TIMES_4, 0)); } - void LoadCompressedFieldAddressForRegOffset(Register address, - Register instance, - Register offset_in_words_as_smi) { +#if defined(DART_COMPRESSED_POINTERS) + void LoadCompressedFieldAddressForRegOffset( + Register address, + Register instance, + Register offset_in_words_as_smi) override { static_assert(kSmiTagShift == 1, "adjust scale factor"); leaq(address, FieldAddress(instance, offset_in_words_as_smi, TIMES_COMPRESSED_HALF_WORD_SIZE, 0)); } +#endif void LoadFieldAddressForOffset(Register address, Register instance, diff --git a/runtime/vm/compiler/backend/flow_graph_compiler.cc b/runtime/vm/compiler/backend/flow_graph_compiler.cc index 6d859c1afaee..2ec1785056d2 100644 --- a/runtime/vm/compiler/backend/flow_graph_compiler.cc +++ b/runtime/vm/compiler/backend/flow_graph_compiler.cc @@ -3163,15 +3163,11 @@ void RangeErrorSlowPath::PushArgumentsForRuntimeCall( // The unboxed int64 argument is passed through a dedicated slot in Thread. // TODO(dartbug.com/33549): Clean this up when unboxed values // could be passed as arguments. + __ StoreToOffset(locs->in(CheckBoundBaseInstr::kLengthPos).reg(), THR, + compiler::target::Thread::unboxed_runtime_arg_offset()); __ StoreToOffset( - locs->in(CheckBoundBaseInstr::kLengthPos).reg(), - compiler::Address( - THR, compiler::target::Thread::unboxed_runtime_arg_offset())); - __ StoreToOffset( - locs->in(CheckBoundBaseInstr::kIndexPos).reg(), - compiler::Address( - THR, compiler::target::Thread::unboxed_runtime_arg_offset() + - kInt64Size)); + locs->in(CheckBoundBaseInstr::kIndexPos).reg(), THR, + compiler::target::Thread::unboxed_runtime_arg_offset() + kInt64Size); } else { __ PushRegisterPair(locs->in(CheckBoundBaseInstr::kIndexPos).reg(), locs->in(CheckBoundBaseInstr::kLengthPos).reg()); diff --git a/runtime/vm/compiler/backend/il.cc b/runtime/vm/compiler/backend/il.cc index dc8ae3454047..e4c85df8de5c 100644 --- a/runtime/vm/compiler/backend/il.cc +++ b/runtime/vm/compiler/backend/il.cc @@ -7528,9 +7528,8 @@ void FfiCallInstr::EmitParamMoves(FlowGraphCompiler* compiler, const auto& target_stack = def_target.AsStack(); __ AddImmediate(temp0, origin.base_reg(), origin.stack_index() * compiler::target::kWordSize); - __ StoreToOffset(temp0, - compiler::Address(target_stack.base_register(), - target_stack.offset_in_bytes())); + __ StoreToOffset(temp0, target_stack.base_register(), + target_stack.offset_in_bytes()); } } else { __ Comment("def_target %s <- origin %s %s", diff --git a/runtime/vm/compiler/backend/il_arm.cc b/runtime/vm/compiler/backend/il_arm.cc index 87abc1f502c1..6b6d24ddf1e1 100644 --- a/runtime/vm/compiler/backend/il_arm.cc +++ b/runtime/vm/compiler/backend/il_arm.cc @@ -300,10 +300,8 @@ static void CopyUpToWordMultiple(FlowGraphCompiler* compiler, tested_bits |= (1 << tested_bit); __ tst(length_reg, compiler::Operand(1 << tested_bit)); auto const sz = OperandSizeFor(bytes); - __ LoadFromOffset(TMP, compiler::Address(src_reg, bytes, mode), sz, - NOT_ZERO); - __ StoreToOffset(TMP, compiler::Address(dest_reg, bytes, mode), sz, - NOT_ZERO); + __ Load(TMP, compiler::Address(src_reg, bytes, mode), sz, NOT_ZERO); + __ Store(TMP, compiler::Address(dest_reg, bytes, mode), sz, NOT_ZERO); } __ bics(length_reg, length_reg, compiler::Operand(tested_bits)); @@ -2322,8 +2320,7 @@ void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { } else { const Register result = locs()->out(0).reg(); if (aligned()) { - __ LoadFromOffset(result, element_address, - RepresentationUtils::OperandSize(rep)); + __ Load(result, element_address, RepresentationUtils::OperandSize(rep)); } else { switch (rep) { case kUnboxedUint32: @@ -2552,8 +2549,7 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { } else { const Register value = locs()->in(2).reg(); if (aligned()) { - __ StoreToOffset(value, element_address, - RepresentationUtils::OperandSize(rep)); + __ Store(value, element_address, RepresentationUtils::OperandSize(rep)); } else { switch (rep) { case kUnboxedUint32: diff --git a/runtime/vm/compiler/backend/il_arm64.cc b/runtime/vm/compiler/backend/il_arm64.cc index fb5d21afc237..f5e1298f8558 100644 --- a/runtime/vm/compiler/backend/il_arm64.cc +++ b/runtime/vm/compiler/backend/il_arm64.cc @@ -457,9 +457,8 @@ class ArgumentsMover : public ValueObject { // Flush all buffered registers. void Flush(FlowGraphCompiler* compiler) { if (pending_register_ != kNoRegister) { - __ StoreToOffset( - pending_register_, - compiler::Address(SP, pending_sp_relative_index_ * kWordSize)); + __ StoreToOffset(pending_register_, SP, + pending_sp_relative_index_ * kWordSize); pending_sp_relative_index_ = -1; pending_register_ = kNoRegister; } diff --git a/runtime/vm/compiler/backend/il_ia32.cc b/runtime/vm/compiler/backend/il_ia32.cc index e4bc484d9d56..41a6c6d29adf 100644 --- a/runtime/vm/compiler/backend/il_ia32.cc +++ b/runtime/vm/compiler/backend/il_ia32.cc @@ -338,10 +338,10 @@ void MoveArgumentInstr::EmitNativeCode(FlowGraphCompiler* compiler) { Location value = locs()->in(0); const compiler::Address dst = LocationToStackSlotAddress(location()); if (value.IsConstant()) { - __ StoreToOffset(value.constant(), dst); + __ Store(value.constant(), dst); } else { ASSERT(value.IsRegister()); - __ StoreToOffset(value.reg(), dst); + __ Store(value.reg(), dst); } } @@ -1771,8 +1771,7 @@ void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { __ movl(result_hi, element_address); } else { Register result = locs()->out(0).reg(); - __ LoadFromOffset(result, element_address, - RepresentationUtils::OperandSize(rep)); + __ Load(result, element_address, RepresentationUtils::OperandSize(rep)); } } else if (RepresentationUtils::IsUnboxed(rep)) { XmmRegister result = locs()->out(0).fpu_reg(); @@ -1923,8 +1922,7 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { __ movl(element_address, value_hi); } else { Register value = locs()->in(2).reg(); - __ StoreToOffset(value, element_address, - RepresentationUtils::OperandSize(rep)); + __ Store(value, element_address, RepresentationUtils::OperandSize(rep)); } } else if (RepresentationUtils::IsUnboxed(rep)) { if (rep == kUnboxedFloat) { diff --git a/runtime/vm/compiler/backend/il_riscv.cc b/runtime/vm/compiler/backend/il_riscv.cc index ea25edee619d..80d93d33f4c3 100644 --- a/runtime/vm/compiler/backend/il_riscv.cc +++ b/runtime/vm/compiler/backend/il_riscv.cc @@ -249,19 +249,15 @@ static void CopyBytes(FlowGraphCompiler* compiler, auto const sz = OperandSizeFor(XLEN / 8); const intptr_t offset = (reversed ? -1 : 1) * (XLEN / 8); const intptr_t initial = reversed ? offset : 0; - __ LoadFromOffset(TMP, compiler::Address(src_reg, initial), sz); - __ LoadFromOffset(TMP2, compiler::Address(src_reg, initial + offset), sz); - __ StoreToOffset(TMP, compiler::Address(dest_reg, initial), sz); - __ StoreToOffset(TMP2, compiler::Address(dest_reg, initial + offset), sz); - __ LoadFromOffset(TMP, compiler::Address(src_reg, initial + 2 * offset), - sz); - __ LoadFromOffset(TMP2, compiler::Address(src_reg, initial + 3 * offset), - sz); + __ LoadFromOffset(TMP, src_reg, initial, sz); + __ LoadFromOffset(TMP2, src_reg, initial + offset, sz); + __ StoreToOffset(TMP, dest_reg, initial, sz); + __ StoreToOffset(TMP2, dest_reg, initial + offset, sz); + __ LoadFromOffset(TMP, src_reg, initial + 2 * offset, sz); + __ LoadFromOffset(TMP2, src_reg, initial + 3 * offset, sz); __ addi(src_reg, src_reg, 4 * offset); - __ StoreToOffset(TMP, compiler::Address(dest_reg, initial + 2 * offset), - sz); - __ StoreToOffset(TMP2, compiler::Address(dest_reg, initial + 3 * offset), - sz); + __ StoreToOffset(TMP, dest_reg, initial + 2 * offset, sz); + __ StoreToOffset(TMP2, dest_reg, initial + 3 * offset, sz); __ addi(dest_reg, dest_reg, 4 * offset); return; } @@ -272,11 +268,11 @@ static void CopyBytes(FlowGraphCompiler* compiler, auto const sz = OperandSizeFor(XLEN / 8); const intptr_t offset = (reversed ? -1 : 1) * (XLEN / 8); const intptr_t initial = reversed ? offset : 0; - __ LoadFromOffset(TMP, compiler::Address(src_reg, initial), sz); - __ LoadFromOffset(TMP2, compiler::Address(src_reg, initial + offset), sz); + __ LoadFromOffset(TMP, src_reg, initial, sz); + __ LoadFromOffset(TMP2, src_reg, initial + offset, sz); __ addi(src_reg, src_reg, 2 * offset); - __ StoreToOffset(TMP, compiler::Address(dest_reg, initial), sz); - __ StoreToOffset(TMP2, compiler::Address(dest_reg, initial + offset), sz); + __ StoreToOffset(TMP, dest_reg, initial, sz); + __ StoreToOffset(TMP2, dest_reg, initial + offset, sz); __ addi(dest_reg, dest_reg, 2 * offset); return; } @@ -286,9 +282,9 @@ static void CopyBytes(FlowGraphCompiler* compiler, auto const sz = OperandSizeFor(count); const intptr_t offset = (reversed ? -1 : 1) * count; const intptr_t initial = reversed ? offset : 0; - __ LoadFromOffset(TMP, compiler::Address(src_reg, initial), sz); + __ LoadFromOffset(TMP, src_reg, initial, sz); __ addi(src_reg, src_reg, offset); - __ StoreToOffset(TMP, compiler::Address(dest_reg, initial), sz); + __ StoreToOffset(TMP, dest_reg, initial, sz); __ addi(dest_reg, dest_reg, offset); } @@ -1758,10 +1754,8 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) { ASSERT(temp1 != CallingConventions::kReturnReg); ASSERT(temp2 != CallingConventions::kReturnReg); compiler::Label not_error; - __ LoadFromOffset( - temp1, - compiler::Address(CallingConventions::kReturnReg, - compiler::target::LocalHandle::ptr_offset())); + __ LoadFromOffset(temp1, CallingConventions::kReturnReg, + compiler::target::LocalHandle::ptr_offset()); __ BranchIfSmi(temp1, ¬_error); __ LoadClassId(temp1, temp1); __ RangeCheck(temp1, temp2, kFirstErrorCid, kLastErrorCid, @@ -2189,13 +2183,11 @@ void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { element_address.offset() + 4)); } else { const Register result = locs()->out(0).reg(); - __ LoadFromOffset(result, element_address, - RepresentationUtils::OperandSize(rep)); + __ Load(result, element_address, RepresentationUtils::OperandSize(rep)); } #else const Register result = locs()->out(0).reg(); - __ LoadFromOffset(result, element_address, - RepresentationUtils::OperandSize(rep)); + __ Load(result, element_address, RepresentationUtils::OperandSize(rep)); #endif } else if (RepresentationUtils::IsUnboxed(rep)) { const FRegister result = locs()->out(0).fpu_reg(); @@ -2502,11 +2494,10 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { } else { if (locs()->in(2).IsConstant()) { ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation()); - __ StoreToOffset(ZR, element_address, - RepresentationUtils::OperandSize(rep)); + __ Store(ZR, element_address, RepresentationUtils::OperandSize(rep)); } else { - __ StoreToOffset(locs()->in(2).reg(), element_address, - RepresentationUtils::OperandSize(rep)); + __ Store(locs()->in(2).reg(), element_address, + RepresentationUtils::OperandSize(rep)); } } } else if (RepresentationUtils::IsUnboxed(rep)) { diff --git a/runtime/vm/compiler/backend/il_x64.cc b/runtime/vm/compiler/backend/il_x64.cc index e30359581c80..2a5b57cef08b 100644 --- a/runtime/vm/compiler/backend/il_x64.cc +++ b/runtime/vm/compiler/backend/il_x64.cc @@ -1996,8 +1996,7 @@ void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { ASSERT(representation() == Boxing::NativeRepresentation(rep)); if (RepresentationUtils::IsUnboxedInteger(rep)) { Register result = locs()->out(0).reg(); - __ LoadFromOffset(result, element_address, - RepresentationUtils::OperandSize(rep)); + __ Load(result, element_address, RepresentationUtils::OperandSize(rep)); } else if (RepresentationUtils::IsUnboxed(rep)) { XmmRegister result = locs()->out(0).fpu_reg(); if (rep == kUnboxedFloat) { @@ -2216,8 +2215,7 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { } } else { Register value = locs()->in(2).reg(); - __ StoreToOffset(value, element_address, - RepresentationUtils::OperandSize(rep)); + __ Store(value, element_address, RepresentationUtils::OperandSize(rep)); } } else if (RepresentationUtils::IsUnboxed(rep)) { if (rep == kUnboxedFloat) { diff --git a/runtime/vm/compiler/stub_code_compiler.cc b/runtime/vm/compiler/stub_code_compiler.cc index a02d280316a7..d2e9ffeb0601 100644 --- a/runtime/vm/compiler/stub_code_compiler.cc +++ b/runtime/vm/compiler/stub_code_compiler.cc @@ -1383,7 +1383,7 @@ void StubCodeCompiler::GenerateAllocateRecordStub() { __ AndImmediate(temp_reg, -target::ObjectAlignment::kObjectAlignment); // Now allocate the object. - __ LoadFromOffset(result_reg, Address(THR, target::Thread::top_offset())); + __ LoadFromOffset(result_reg, THR, target::Thread::top_offset()); __ MoveRegister(new_top_reg, temp_reg); __ AddRegisters(new_top_reg, result_reg); // Check if the allocation fits into the remaining space. @@ -1394,7 +1394,7 @@ void StubCodeCompiler::GenerateAllocateRecordStub() { // Successfully allocated the object, now update top to point to // next object start and initialize the object. - __ StoreToOffset(new_top_reg, Address(THR, target::Thread::top_offset())); + __ StoreToOffset(new_top_reg, THR, target::Thread::top_offset()); __ AddImmediate(result_reg, kHeapObjectTag); // Calculate the size tag. @@ -1414,9 +1414,8 @@ void StubCodeCompiler::GenerateAllocateRecordStub() { __ Bind(&done); uword tags = target::MakeTagWordForNewSpaceObject(kRecordCid, 0); __ OrImmediate(temp_reg, tags); - __ StoreToOffset( - temp_reg, - FieldAddress(result_reg, target::Object::tags_offset())); // Tags. + __ StoreFieldToOffset(temp_reg, result_reg, + target::Object::tags_offset()); // Tags. } __ StoreCompressedIntoObjectNoBarrier( @@ -1807,11 +1806,10 @@ static void CallDartCoreLibraryFunction( __ Call(Address(THR, entry_point_offset_in_thread)); } else { __ LoadIsolateGroup(FUNCTION_REG); - __ LoadFromOffset( - FUNCTION_REG, - Address(FUNCTION_REG, target::IsolateGroup::object_store_offset())); - __ LoadFromOffset(FUNCTION_REG, - Address(FUNCTION_REG, function_offset_in_object_store)); + __ LoadFromOffset(FUNCTION_REG, FUNCTION_REG, + target::IsolateGroup::object_store_offset()); + __ LoadFromOffset(FUNCTION_REG, FUNCTION_REG, + function_offset_in_object_store); __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG, target::Function::code_offset()); if (!uses_args_desc) { @@ -1855,7 +1853,7 @@ static void GenerateAllocateSuspendState(Assembler* assembler, __ AndImmediate(temp_reg, -target::ObjectAlignment::kObjectAlignment); // Now allocate the object. - __ LoadFromOffset(result_reg, Address(THR, target::Thread::top_offset())); + __ LoadFromOffset(result_reg, THR, target::Thread::top_offset()); __ AddRegisters(temp_reg, result_reg); // Check if the allocation fits into the remaining space. __ CompareWithMemoryValue(temp_reg, @@ -1865,7 +1863,7 @@ static void GenerateAllocateSuspendState(Assembler* assembler, // Successfully allocated the object, now update top to point to // next object start and initialize the object. - __ StoreToOffset(temp_reg, Address(THR, target::Thread::top_offset())); + __ StoreToOffset(temp_reg, THR, target::Thread::top_offset()); __ SubRegisters(temp_reg, result_reg); __ AddImmediate(result_reg, kHeapObjectTag); @@ -1873,9 +1871,8 @@ static void GenerateAllocateSuspendState(Assembler* assembler, // Use rounded object size to calculate and save frame capacity. __ AddImmediate(temp_reg, temp_reg, -target::SuspendState::payload_offset()); - __ StoreToOffset( - temp_reg, FieldAddress(result_reg, - target::SuspendState::frame_capacity_offset())); + __ StoreFieldToOffset(temp_reg, result_reg, + target::SuspendState::frame_capacity_offset()); // Restore rounded object size. __ AddImmediate(temp_reg, temp_reg, target::SuspendState::payload_offset()); } @@ -1897,14 +1894,12 @@ static void GenerateAllocateSuspendState(Assembler* assembler, __ Bind(&done); uword tags = target::MakeTagWordForNewSpaceObject(kSuspendStateCid, 0); __ OrImmediate(temp_reg, tags); - __ StoreToOffset( - temp_reg, - FieldAddress(result_reg, target::Object::tags_offset())); // Tags. + __ StoreFieldToOffset(temp_reg, result_reg, + target::Object::tags_offset()); // Tags. } - __ StoreToOffset( - frame_size_reg, - FieldAddress(result_reg, target::SuspendState::frame_size_offset())); + __ StoreFieldToOffset(frame_size_reg, result_reg, + target::SuspendState::frame_size_offset()); } void StubCodeCompiler::GenerateSuspendStub( @@ -1927,7 +1922,7 @@ void StubCodeCompiler::GenerateSuspendStub( SPILLS_LR_TO_FRAME({}); // Simulate entering the caller (Dart) frame. #endif - __ LoadFromOffset(kSuspendState, Address(FPREG, SuspendStateFpOffset())); + __ LoadFromOffset(kSuspendState, FPREG, SuspendStateFpOffset()); __ AddImmediate( kFrameSize, FPREG, @@ -1954,9 +1949,8 @@ void StubCodeCompiler::GenerateSuspendStub( target::SuspendState::frame_capacity_offset())); __ BranchIf(UNSIGNED_GREATER, &resize_suspend_state); - __ StoreToOffset( - kFrameSize, - FieldAddress(kSuspendState, target::SuspendState::frame_size_offset())); + __ StoreFieldToOffset(kFrameSize, kSuspendState, + target::SuspendState::frame_size_offset()); __ Jump(&init_done); __ Bind(&alloc_suspend_state); @@ -1996,9 +1990,8 @@ void StubCodeCompiler::GenerateSuspendStub( __ Bind(&alloc_done); __ Comment("Save SuspendState to frame"); - __ LoadFromOffset( - kTemp, Address(FPREG, kSavedCallerFpSlotFromFp * target::kWordSize)); - __ StoreToOffset(kSuspendState, Address(kTemp, SuspendStateFpOffset())); + __ LoadFromOffset(kTemp, FPREG, kSavedCallerFpSlotFromFp * target::kWordSize); + __ StoreToOffset(kSuspendState, kTemp, SuspendStateFpOffset()); __ Bind(&init_done); __ Comment("Copy frame to SuspendState"); @@ -2007,9 +2000,8 @@ void StubCodeCompiler::GenerateSuspendStub( { // Verify that SuspendState.frame_size == kFrameSize. Label okay; - __ LoadFromOffset( - kTemp, - FieldAddress(kSuspendState, target::SuspendState::frame_size_offset())); + __ LoadFieldFromOffset(kTemp, kSuspendState, + target::SuspendState::frame_size_offset()); __ CompareRegisters(kTemp, kFrameSize); __ BranchIf(EQUAL, &okay); __ Breakpoint(); @@ -2028,23 +2020,21 @@ void StubCodeCompiler::GenerateSuspendStub( __ PopRegister(THR); } - __ LoadFromOffset( - kTemp, Address(FPREG, kSavedCallerPcSlotFromFp * target::kWordSize)); - __ StoreToOffset( - kTemp, FieldAddress(kSuspendState, target::SuspendState::pc_offset())); + __ LoadFromOffset(kTemp, FPREG, kSavedCallerPcSlotFromFp * target::kWordSize); + __ StoreFieldToOffset(kTemp, kSuspendState, + target::SuspendState::pc_offset()); #ifdef DEBUG { // Verify that kSuspendState matches :suspend_state in the copied stack // frame. Label okay; - __ LoadFromOffset( - kTemp, - FieldAddress(kSuspendState, target::SuspendState::frame_size_offset())); + __ LoadFieldFromOffset(kTemp, kSuspendState, + target::SuspendState::frame_size_offset()); __ AddRegisters(kTemp, kSuspendState); - __ LoadFromOffset( - kTemp, FieldAddress(kTemp, target::SuspendState::payload_offset() + - SuspendStateFpOffset())); + __ LoadFieldFromOffset( + kTemp, kTemp, + target::SuspendState::payload_offset() + SuspendStateFpOffset()); __ CompareRegisters(kTemp, kSuspendState); __ BranchIf(EQUAL, &okay); __ Breakpoint(); @@ -2060,7 +2050,7 @@ void StubCodeCompiler::GenerateSuspendStub( // Write barrier. __ AndImmediate(kTemp, kSuspendState, target::kPageMask); - __ LoadFromOffset(kTemp, Address(kTemp, target::Page::original_top_offset())); + __ LoadFromOffset(kTemp, kTemp, target::Page::original_top_offset()); __ CompareRegisters(kSuspendState, kTemp); __ BranchIf(UNSIGNED_LESS, &remember_object); // Assumption: SuspendStates are always on non-image pages. @@ -2097,8 +2087,8 @@ void StubCodeCompiler::GenerateSuspendStub( // will only unwind frame and return. if (!FLAG_precompiled_mode) { __ LoadFromOffset( - PP, Address(FPREG, target::frame_layout.saved_caller_pp_from_fp * - target::kWordSize)); + PP, FPREG, + target::frame_layout.saved_caller_pp_from_fp * target::kWordSize); } #endif __ Ret(); @@ -2225,8 +2215,8 @@ void StubCodeCompiler::GenerateInitSuspendableFunctionStub( __ LeaveStubFrame(); // Set :suspend_state in the caller frame. - __ StoreToOffset(CallingConventions::kReturnReg, - Address(FPREG, SuspendStateFpOffset())); + __ StoreToOffset(CallingConventions::kReturnReg, FPREG, + SuspendStateFpOffset()); __ Ret(); } @@ -2266,8 +2256,7 @@ void StubCodeCompiler::GenerateResumeStub() { const intptr_t param_offset = target::frame_layout.param_end_from_fp * target::kWordSize; - __ LoadFromOffset(kSuspendState, - Address(FPREG, param_offset + 4 * target::kWordSize)); + __ LoadFromOffset(kSuspendState, FPREG, param_offset + 4 * target::kWordSize); #ifdef DEBUG { Label okay; @@ -2278,8 +2267,8 @@ void StubCodeCompiler::GenerateResumeStub() { } { Label okay; - __ LoadFromOffset( - kTemp, FieldAddress(kSuspendState, target::SuspendState::pc_offset())); + __ LoadFieldFromOffset(kTemp, kSuspendState, + target::SuspendState::pc_offset()); __ CompareImmediate(kTemp, 0); __ BranchIf(NOT_EQUAL, &okay); __ Breakpoint(); @@ -2287,17 +2276,16 @@ void StubCodeCompiler::GenerateResumeStub() { } #endif - __ LoadFromOffset( - kFrameSize, - FieldAddress(kSuspendState, target::SuspendState::frame_size_offset())); + __ LoadFieldFromOffset(kFrameSize, kSuspendState, + target::SuspendState::frame_size_offset()); #ifdef DEBUG { Label okay; __ MoveRegister(kTemp, kFrameSize); __ AddRegisters(kTemp, kSuspendState); - __ LoadFromOffset( - kTemp, FieldAddress(kTemp, target::SuspendState::payload_offset() + - SuspendStateFpOffset())); + __ LoadFieldFromOffset( + kTemp, kTemp, + target::SuspendState::payload_offset() + SuspendStateFpOffset()); __ CompareRegisters(kTemp, kSuspendState); __ BranchIf(EQUAL, &okay); __ Breakpoint(); @@ -2310,13 +2298,11 @@ void StubCodeCompiler::GenerateResumeStub() { __ MoveRegister(kTemp, kSuspendState); __ AddRegisters(kTemp, kFrameSize); __ LoadFromOffset( - CODE_REG, - Address(kTemp, - target::SuspendState::payload_offset() - kHeapObjectTag + - target::frame_layout.code_from_fp * target::kWordSize)); - __ StoreToOffset( - CODE_REG, - Address(FPREG, target::frame_layout.code_from_fp * target::kWordSize)); + CODE_REG, kTemp, + target::SuspendState::payload_offset() - kHeapObjectTag + + target::frame_layout.code_from_fp * target::kWordSize); + __ StoreToOffset(CODE_REG, FPREG, + target::frame_layout.code_from_fp * target::kWordSize); #if !defined(TARGET_ARCH_IA32) __ LoadPoolPointer(PP); #endif @@ -2349,8 +2335,8 @@ void StubCodeCompiler::GenerateResumeStub() { __ Comment("Transfer control"); - __ LoadFromOffset(kResumePc, FieldAddress(kSuspendState, - target::SuspendState::pc_offset())); + __ LoadFieldFromOffset(kResumePc, kSuspendState, + target::SuspendState::pc_offset()); __ StoreZero(FieldAddress(kSuspendState, target::SuspendState::pc_offset()), kTemp); @@ -2363,15 +2349,14 @@ void StubCodeCompiler::GenerateResumeStub() { static_assert((kException != CODE_REG) && (kException != PP), "should not interfere"); - __ LoadFromOffset(kException, - Address(FPREG, param_offset + 2 * target::kWordSize)); + __ LoadFromOffset(kException, FPREG, param_offset + 2 * target::kWordSize); __ CompareObject(kException, NullObject()); __ BranchIf(NOT_EQUAL, &call_runtime); if (!FLAG_precompiled_mode) { // Check if Code is disabled. - __ LoadFromOffset( - kTemp, FieldAddress(CODE_REG, target::Code::instructions_offset())); + __ LoadFieldFromOffset(kTemp, CODE_REG, + target::Code::instructions_offset()); __ CompareWithMemoryValue( kTemp, FieldAddress(CODE_REG, target::Code::active_instructions_offset())); @@ -2380,25 +2365,23 @@ void StubCodeCompiler::GenerateResumeStub() { #if !defined(PRODUCT) // Check if there is a breakpoint at resumption. __ LoadIsolate(kTemp); - __ LoadFromOffset( - kTemp, - Address(kTemp, target::Isolate::has_resumption_breakpoints_offset()), - kUnsignedByte); + __ LoadFromOffset(kTemp, kTemp, + target::Isolate::has_resumption_breakpoints_offset(), + kUnsignedByte); __ CompareImmediate(kTemp, 0); __ BranchIf(NOT_EQUAL, &call_runtime); #endif } - __ LoadFromOffset(CallingConventions::kReturnReg, - Address(FPREG, param_offset + 3 * target::kWordSize)); + __ LoadFromOffset(CallingConventions::kReturnReg, FPREG, + param_offset + 3 * target::kWordSize); __ Jump(kResumePc); __ Comment("Call runtime to throw exception or deopt"); __ Bind(&call_runtime); - __ LoadFromOffset(kStackTrace, - Address(FPREG, param_offset + 1 * target::kWordSize)); + __ LoadFromOffset(kStackTrace, FPREG, param_offset + 1 * target::kWordSize); static_assert((kStackTrace != CODE_REG) && (kStackTrace != PP), "should not interfere"); @@ -2421,8 +2404,8 @@ void StubCodeCompiler::GenerateResumeStub() { __ Breakpoint(); } else { __ LeaveStubFrame(); - __ LoadFromOffset(CallingConventions::kReturnReg, - Address(FPREG, param_offset + 3 * target::kWordSize)); + __ LoadFromOffset(CallingConventions::kReturnReg, FPREG, + param_offset + 3 * target::kWordSize); // Lazy deoptimize. __ Ret(); } @@ -2438,7 +2421,7 @@ void StubCodeCompiler::GenerateReturnStub( SPILLS_LR_TO_FRAME({}); // Simulate entering the caller (Dart) frame. #endif - __ LoadFromOffset(kSuspendState, Address(FPREG, SuspendStateFpOffset())); + __ LoadFromOffset(kSuspendState, FPREG, SuspendStateFpOffset()); #ifdef DEBUG { Label okay; @@ -2492,7 +2475,7 @@ void StubCodeCompiler::GenerateAsyncExceptionHandlerStub() { SPILLS_LR_TO_FRAME({}); // Simulate entering the caller (Dart) frame. #endif - __ LoadFromOffset(kSuspendState, Address(FPREG, SuspendStateFpOffset())); + __ LoadFromOffset(kSuspendState, FPREG, SuspendStateFpOffset()); // Check if suspend_state is initialized. Otherwise // exception was thrown from the prologue code and @@ -2546,8 +2529,7 @@ void StubCodeCompiler::GenerateCloneSuspendStateStub() { { // Can only clone _SuspendState objects with copied frames. Label okay; - __ LoadFromOffset(kTemp, - FieldAddress(kSource, target::SuspendState::pc_offset())); + __ LoadFieldFromOffset(kTemp, kSource, target::SuspendState::pc_offset()); __ CompareImmediate(kTemp, 0); __ BranchIf(NOT_EQUAL, &okay); __ Breakpoint(); @@ -2555,18 +2537,15 @@ void StubCodeCompiler::GenerateCloneSuspendStateStub() { } #endif - __ LoadFromOffset( - kFrameSize, - FieldAddress(kSource, target::SuspendState::frame_size_offset())); + __ LoadFieldFromOffset(kFrameSize, kSource, + target::SuspendState::frame_size_offset()); GenerateAllocateSuspendState(assembler, &alloc_slow_case, kDestination, kFrameSize, kTemp); // Copy pc. - __ LoadFromOffset(kTemp, - FieldAddress(kSource, target::SuspendState::pc_offset())); - __ StoreToOffset( - kTemp, FieldAddress(kDestination, target::SuspendState::pc_offset())); + __ LoadFieldFromOffset(kTemp, kSource, target::SuspendState::pc_offset()); + __ StoreFieldToOffset(kTemp, kDestination, target::SuspendState::pc_offset()); // Copy function_data. __ LoadCompressedFieldFromOffset( @@ -2606,13 +2585,12 @@ void StubCodeCompiler::GenerateCloneSuspendStateStub() { // Update value of :suspend_state variable in the copied frame // for the new SuspendState. - __ LoadFromOffset( - kTemp, - FieldAddress(kDestination, target::SuspendState::frame_size_offset())); + __ LoadFieldFromOffset(kTemp, kDestination, + target::SuspendState::frame_size_offset()); __ AddRegisters(kTemp, kDestination); - __ StoreToOffset(kDestination, - FieldAddress(kTemp, target::SuspendState::payload_offset() + - SuspendStateFpOffset())); + __ StoreFieldToOffset( + kDestination, kTemp, + target::SuspendState::payload_offset() + SuspendStateFpOffset()); __ MoveRegister(CallingConventions::kReturnReg, kDestination); EnsureIsNewOrRemembered(); diff --git a/runtime/vm/compiler/stub_code_compiler_arm.cc b/runtime/vm/compiler/stub_code_compiler_arm.cc index a840d83f5b9d..929b37eae19a 100644 --- a/runtime/vm/compiler/stub_code_compiler_arm.cc +++ b/runtime/vm/compiler/stub_code_compiler_arm.cc @@ -42,7 +42,7 @@ void StubCodeCompiler::EnsureIsNewOrRemembered() { // Page's TLAB use is always ascending. Label done; __ AndImmediate(TMP, R0, target::kPageMask); - __ LoadFromOffset(TMP, Address(TMP, target::Page::original_top_offset())); + __ LoadFromOffset(TMP, TMP, target::Page::original_top_offset()); __ CompareRegisters(R0, TMP); __ BranchIf(UNSIGNED_GREATER_EQUAL, &done); @@ -322,9 +322,8 @@ void StubCodeCompiler::GenerateLoadFfiCallbackMetadataRuntimeFunction( __ AndImmediate(dst, dst, FfiCallbackMetadata::kPageMask); // Load the function from the function table. - __ LoadFromOffset( - dst, - Address(dst, FfiCallbackMetadata::RuntimeFunctionOffset(function_index))); + __ LoadFromOffset(dst, dst, + FfiCallbackMetadata::RuntimeFunctionOffset(function_index)); } void StubCodeCompiler::GenerateFfiCallbackTrampolineStub() { diff --git a/runtime/vm/compiler/stub_code_compiler_arm64.cc b/runtime/vm/compiler/stub_code_compiler_arm64.cc index 586e2505b4f0..fbcd27b12e2c 100644 --- a/runtime/vm/compiler/stub_code_compiler_arm64.cc +++ b/runtime/vm/compiler/stub_code_compiler_arm64.cc @@ -41,7 +41,7 @@ void StubCodeCompiler::EnsureIsNewOrRemembered() { // Page's TLAB use is always ascending. Label done; __ AndImmediate(TMP, R0, target::kPageMask); - __ LoadFromOffset(TMP, Address(TMP, target::Page::original_top_offset())); + __ LoadFromOffset(TMP, TMP, target::Page::original_top_offset()); __ CompareRegisters(R0, TMP); __ BranchIf(UNSIGNED_GREATER_EQUAL, &done); @@ -465,9 +465,8 @@ void StubCodeCompiler::GenerateLoadFfiCallbackMetadataRuntimeFunction( __ andi(dst, dst, Immediate(FfiCallbackMetadata::kPageMask)); // Load the function from the function table. - __ LoadFromOffset( - dst, - Address(dst, FfiCallbackMetadata::RuntimeFunctionOffset(function_index))); + __ LoadFromOffset(dst, dst, + FfiCallbackMetadata::RuntimeFunctionOffset(function_index)); } void StubCodeCompiler::GenerateFfiCallbackTrampolineStub() { diff --git a/runtime/vm/compiler/stub_code_compiler_ia32.cc b/runtime/vm/compiler/stub_code_compiler_ia32.cc index 73ecde58f643..27b8bd4cc424 100644 --- a/runtime/vm/compiler/stub_code_compiler_ia32.cc +++ b/runtime/vm/compiler/stub_code_compiler_ia32.cc @@ -40,7 +40,7 @@ void StubCodeCompiler::EnsureIsNewOrRemembered() { // Page's TLAB use is always ascending. Label done; __ AndImmediate(ECX, EAX, target::kPageMask); - __ LoadFromOffset(ECX, Address(ECX, target::Page::original_top_offset())); + __ LoadFromOffset(ECX, ECX, target::Page::original_top_offset()); __ CompareRegisters(EAX, ECX); __ BranchIf(UNSIGNED_GREATER_EQUAL, &done); diff --git a/runtime/vm/compiler/stub_code_compiler_riscv.cc b/runtime/vm/compiler/stub_code_compiler_riscv.cc index 266a3435f5dd..f8505e87cb44 100644 --- a/runtime/vm/compiler/stub_code_compiler_riscv.cc +++ b/runtime/vm/compiler/stub_code_compiler_riscv.cc @@ -41,7 +41,7 @@ void StubCodeCompiler::EnsureIsNewOrRemembered() { // Page's TLAB use is always ascending. Label done; __ AndImmediate(TMP, A0, target::kPageMask); - __ LoadFromOffset(TMP, Address(TMP, target::Page::original_top_offset())); + __ LoadFromOffset(TMP, TMP, target::Page::original_top_offset()); __ CompareRegisters(A0, TMP); __ BranchIf(UNSIGNED_GREATER_EQUAL, &done); @@ -333,9 +333,8 @@ void StubCodeCompiler::GenerateLoadFfiCallbackMetadataRuntimeFunction( __ AndImmediate(dst, FfiCallbackMetadata::kPageMask); // Load the function from the function table. - __ LoadFromOffset( - dst, - Address(dst, FfiCallbackMetadata::RuntimeFunctionOffset(function_index))); + __ LoadFromOffset(dst, dst, + FfiCallbackMetadata::RuntimeFunctionOffset(function_index)); } void StubCodeCompiler::GenerateFfiCallbackTrampolineStub() { diff --git a/runtime/vm/compiler/stub_code_compiler_x64.cc b/runtime/vm/compiler/stub_code_compiler_x64.cc index 9c685cdba7ef..313c7aa92590 100644 --- a/runtime/vm/compiler/stub_code_compiler_x64.cc +++ b/runtime/vm/compiler/stub_code_compiler_x64.cc @@ -44,7 +44,7 @@ void StubCodeCompiler::EnsureIsNewOrRemembered() { // Page's TLAB use is always ascending. Label done; __ AndImmediate(TMP, RAX, target::kPageMask); - __ LoadFromOffset(TMP, Address(TMP, target::Page::original_top_offset())); + __ LoadFromOffset(TMP, TMP, target::Page::original_top_offset()); __ CompareRegisters(RAX, TMP); __ BranchIf(UNSIGNED_GREATER_EQUAL, &done); @@ -429,9 +429,8 @@ void StubCodeCompiler::GenerateLoadFfiCallbackMetadataRuntimeFunction( __ andq(dst, Immediate(FfiCallbackMetadata::kPageMask)); // Load the function from the function table. - __ LoadFromOffset( - dst, - Address(dst, FfiCallbackMetadata::RuntimeFunctionOffset(function_index))); + __ LoadFromOffset(dst, dst, + FfiCallbackMetadata::RuntimeFunctionOffset(function_index)); } static const RegisterSet kArgumentRegisterSet(