Commit ae922188 authored by Santiago Aboy Solanes's avatar Santiago Aboy Solanes Committed by V8 LUCI CQ

[compiler] Add TSAN support for generated code movq and movl

We have to not have any instructions between EmitOOLTrapIfNeeded and the
movs. For this reason, we are now emitting EmitTSANStoreOOLIfNeeded
after the store rather than before.

We are also now requiring the code_kind to know if we are compiling a
FOR_TESTING function.

Finally, we have to differentiate between two different wasm-to-js
functions: one lives in the wasm code space, and another one lives on
the heap. The one that lives in wasm code space calls wasm stub calls,
and the other one calls the builtin like JS does.

Bug: v8:7790, v8:11600
Change-Id: Iafb4643068ae4e31881662e032f73af98a66baca
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2945185
Commit-Queue: Santiago Aboy Solanes <solanes@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75077}
parent 2b9cd1c9
...@@ -498,16 +498,30 @@ void TurboAssembler::CallTSANRelaxedStoreStub(Register address, Register value, ...@@ -498,16 +498,30 @@ void TurboAssembler::CallTSANRelaxedStoreStub(Register address, Register value,
// value_parameter <= value // value_parameter <= value
MovePair(address_parameter, address, value_parameter, value); MovePair(address_parameter, address, value_parameter, value);
if (mode == StubCallMode::kCallWasmRuntimeStub) { if (isolate()) {
// Use {near_call} for direct Wasm call within a module.
auto wasm_target = wasm::WasmCode::GetTSANRelaxedStoreStub(fp_mode, size);
near_call(wasm_target, RelocInfo::WASM_STUB_CALL);
} else {
auto builtin_index = Builtins::GetTSANRelaxedStoreStub(fp_mode, size); auto builtin_index = Builtins::GetTSANRelaxedStoreStub(fp_mode, size);
Handle<Code> code_target = Handle<Code> code_target =
isolate()->builtins()->builtin_handle(builtin_index); isolate()->builtins()->builtin_handle(builtin_index);
Call(code_target, RelocInfo::CODE_TARGET); Call(code_target, RelocInfo::CODE_TARGET);
} }
#if V8_ENABLE_WEBASSEMBLY
// There are two different kinds of wasm-to-js functions: one lives in the
// wasm code space, and another one lives on the heap. Both of them have the
// same CodeKind (WASM_TO_JS_FUNCTION), but depending on where they are they
// have to either use the wasm stub calls, or call the builtin using the
// isolate like JS does. In order to know which wasm-to-js function we are
// compiling right now, we check if the isolate is null.
// TODO(solanes, v8:11600): Split CodeKind::WASM_TO_JS_FUNCTION into two
// different CodeKinds and pass the CodeKind as a parameter so that we can use
// that instead of a nullptr check.
// NOLINTNEXTLINE(readability/braces)
else {
DCHECK_EQ(mode, StubCallMode::kCallWasmRuntimeStub);
// Use {near_call} for direct Wasm call within a module.
auto wasm_target = wasm::WasmCode::GetTSANRelaxedStoreStub(fp_mode, size);
near_call(wasm_target, RelocInfo::WASM_STUB_CALL);
}
#endif // V8_ENABLE_WEBASSEMBLY
MaybeRestoreRegisters(registers); MaybeRestoreRegisters(registers);
} }
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include "src/base/optional.h" #include "src/base/optional.h"
#include "src/codegen/macro-assembler.h" #include "src/codegen/macro-assembler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/codegen/safepoint-table.h" #include "src/codegen/safepoint-table.h"
#include "src/codegen/source-position-table.h" #include "src/codegen/source-position-table.h"
#include "src/compiler/backend/gap-resolver.h" #include "src/compiler/backend/gap-resolver.h"
...@@ -16,13 +17,12 @@ ...@@ -16,13 +17,12 @@
#include "src/compiler/backend/unwinding-info-writer.h" #include "src/compiler/backend/unwinding-info-writer.h"
#include "src/compiler/osr.h" #include "src/compiler/osr.h"
#include "src/deoptimizer/deoptimizer.h" #include "src/deoptimizer/deoptimizer.h"
#include "src/objects/code-kind.h"
#include "src/trap-handler/trap-handler.h" #include "src/trap-handler/trap-handler.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
class OptimizedCompilationInfo;
namespace compiler { namespace compiler {
// Forward declarations. // Forward declarations.
...@@ -188,6 +188,8 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler { ...@@ -188,6 +188,8 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
bool ShouldApplyOffsetToStackCheck(Instruction* instr, uint32_t* offset); bool ShouldApplyOffsetToStackCheck(Instruction* instr, uint32_t* offset);
uint32_t GetStackCheckOffset(); uint32_t GetStackCheckOffset();
CodeKind code_kind() const { return info_->code_kind(); }
private: private:
GapResolver* resolver() { return &resolver_; } GapResolver* resolver() { return &resolver_; }
SafepointTableBuilder* safepoints() { return &safepoints_; } SafepointTableBuilder* safepoints() { return &safepoints_; }
......
...@@ -179,6 +179,16 @@ class OperandGenerator { ...@@ -179,6 +179,16 @@ class OperandGenerator {
GetVReg(node))); GetVReg(node)));
} }
enum class RegisterUseKind { kUseRegister, kUseUniqueRegister };
InstructionOperand UseRegister(Node* node, RegisterUseKind unique_reg) {
if (V8_LIKELY(unique_reg == RegisterUseKind::kUseRegister)) {
return UseRegister(node);
} else {
DCHECK_EQ(unique_reg, RegisterUseKind::kUseUniqueRegister);
return UseUniqueRegister(node);
}
}
InstructionOperand UseFixed(Node* node, Register reg) { InstructionOperand UseFixed(Node* node, Register reg) {
return Use(node, UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER, return Use(node, UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
reg.code(), GetVReg(node))); reg.code(), GetVReg(node)));
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include "src/compiler/node-matchers.h" #include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h" #include "src/compiler/osr.h"
#include "src/heap/memory-chunk.h" #include "src/heap/memory-chunk.h"
#include "src/objects/code-kind.h"
#include "src/objects/smi.h" #include "src/objects/smi.h"
#if V8_ENABLE_WEBASSEMBLY #if V8_ENABLE_WEBASSEMBLY
...@@ -376,6 +377,13 @@ void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen, ...@@ -376,6 +377,13 @@ void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
TurboAssembler* tasm, Operand operand, TurboAssembler* tasm, Operand operand,
Register value_reg, X64OperandConverter& i, Register value_reg, X64OperandConverter& i,
StubCallMode mode, int size) { StubCallMode mode, int size) {
// The FOR_TESTING code doesn't initialize the root register. We can't call
// the TSAN builtin since we need to load the external reference through the
// root register.
// TODO(solanes, v8:7790, v8:11600): See if we can support the FOR_TESTING
// path. It is not crucial, but it would be nice to remove this if.
if (codegen->code_kind() == CodeKind::FOR_TESTING) return;
Register scratch0 = i.TempRegister(0); Register scratch0 = i.TempRegister(0);
auto tsan_ool = zone->New<OutOfLineTSANRelaxedStore>( auto tsan_ool = zone->New<OutOfLineTSANRelaxedStore>(
codegen, operand, value_reg, scratch0, mode, size); codegen, operand, value_reg, scratch0, mode, size);
...@@ -387,6 +395,13 @@ void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen, ...@@ -387,6 +395,13 @@ void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
TurboAssembler* tasm, Operand operand, TurboAssembler* tasm, Operand operand,
Immediate value, X64OperandConverter& i, Immediate value, X64OperandConverter& i,
StubCallMode mode, int size) { StubCallMode mode, int size) {
// The FOR_TESTING code doesn't initialize the root register. We can't call
// the TSAN builtin since we need to load the external reference through the
// root register.
// TODO(solanes, v8:7790, v8:11600): See if we can support the FOR_TESTING
// path. It is not crucial, but it would be nice to remove this if.
if (codegen->code_kind() == CodeKind::FOR_TESTING) return;
Register value_reg = i.TempRegister(1); Register value_reg = i.TempRegister(1);
tasm->movq(value_reg, value); tasm->movq(value_reg, value);
EmitTSANStoreOOLIfNeeded(zone, codegen, tasm, operand, value_reg, i, mode, EmitTSANStoreOOLIfNeeded(zone, codegen, tasm, operand, value_reg, i, mode,
...@@ -1269,9 +1284,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1269,9 +1284,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index); Operand operand = i.MemoryOperand(&index);
Register value = i.InputRegister(index); Register value = i.InputRegister(index);
Register scratch0 = i.TempRegister(0); Register scratch0 = i.TempRegister(0);
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
DetermineStubCallMode(), kTaggedSize);
Register scratch1 = i.TempRegister(1); Register scratch1 = i.TempRegister(1);
auto ool = zone()->New<OutOfLineRecordWrite>(this, object, operand, value, auto ool = zone()->New<OutOfLineRecordWrite>(this, object, operand, value,
scratch0, scratch1, mode, scratch0, scratch1, mode,
...@@ -1284,6 +1296,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1284,6 +1296,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
MemoryChunk::kPointersFromHereAreInterestingMask, MemoryChunk::kPointersFromHereAreInterestingMask,
not_zero, ool->entry()); not_zero, ool->entry());
__ bind(ool->exit()); __ bind(ool->exit());
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
DetermineStubCallMode(), kTaggedSize);
break; break;
} }
case kArchWordPoisonOnSpeculation: case kArchWordPoisonOnSpeculation:
...@@ -2180,9 +2194,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2180,9 +2194,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
size_t index = 0; size_t index = 0;
Operand operand = i.MemoryOperand(&index); Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) { if (HasImmediateInput(instr, index)) {
__ movl(operand, i.InputImmediate(index)); Immediate value = i.InputImmediate(index);
__ movl(operand, value);
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
DetermineStubCallMode(), kInt32Size);
} else { } else {
__ movl(operand, i.InputRegister(index)); Register value = i.InputRegister(index);
__ movl(operand, value);
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
DetermineStubCallMode(), kInt32Size);
} }
} }
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
...@@ -2216,14 +2236,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2216,14 +2236,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index); Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) { if (HasImmediateInput(instr, index)) {
Immediate value = i.InputImmediate(index); Immediate value = i.InputImmediate(index);
__ StoreTaggedField(operand, value);
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i, EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
DetermineStubCallMode(), kTaggedSize); DetermineStubCallMode(), kTaggedSize);
__ StoreTaggedField(operand, value);
} else { } else {
Register value = i.InputRegister(index); Register value = i.InputRegister(index);
__ StoreTaggedField(operand, value);
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i, EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
DetermineStubCallMode(), kTaggedSize); DetermineStubCallMode(), kTaggedSize);
__ StoreTaggedField(operand, value);
} }
break; break;
} }
...@@ -2235,9 +2255,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2235,9 +2255,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
size_t index = 0; size_t index = 0;
Operand operand = i.MemoryOperand(&index); Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) { if (HasImmediateInput(instr, index)) {
__ movq(operand, i.InputImmediate(index)); Immediate value = i.InputImmediate(index);
__ movq(operand, value);
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
DetermineStubCallMode(), kInt64Size);
} else { } else {
__ movq(operand, i.InputRegister(index)); Register value = i.InputRegister(index);
__ movq(operand, value);
EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
DetermineStubCallMode(), kInt64Size);
} }
} }
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
......
...@@ -123,11 +123,11 @@ class X64OperandGenerator final : public OperandGenerator { ...@@ -123,11 +123,11 @@ class X64OperandGenerator final : public OperandGenerator {
return false; return false;
} }
AddressingMode GenerateMemoryOperandInputs(Node* index, int scale_exponent, AddressingMode GenerateMemoryOperandInputs(
Node* base, Node* displacement, Node* index, int scale_exponent, Node* base, Node* displacement,
DisplacementMode displacement_mode, DisplacementMode displacement_mode, InstructionOperand inputs[],
InstructionOperand inputs[], size_t* input_count,
size_t* input_count) { RegisterUseKind reg_kind = RegisterUseKind::kUseRegister) {
AddressingMode mode = kMode_MRI; AddressingMode mode = kMode_MRI;
if (base != nullptr && (index != nullptr || displacement != nullptr)) { if (base != nullptr && (index != nullptr || displacement != nullptr)) {
if (base->opcode() == IrOpcode::kInt32Constant && if (base->opcode() == IrOpcode::kInt32Constant &&
...@@ -139,10 +139,10 @@ class X64OperandGenerator final : public OperandGenerator { ...@@ -139,10 +139,10 @@ class X64OperandGenerator final : public OperandGenerator {
} }
} }
if (base != nullptr) { if (base != nullptr) {
inputs[(*input_count)++] = UseRegister(base); inputs[(*input_count)++] = UseRegister(base, reg_kind);
if (index != nullptr) { if (index != nullptr) {
DCHECK(scale_exponent >= 0 && scale_exponent <= 3); DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
inputs[(*input_count)++] = UseRegister(index); inputs[(*input_count)++] = UseRegister(index, reg_kind);
if (displacement != nullptr) { if (displacement != nullptr) {
inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
? UseNegatedImmediate(displacement) ? UseNegatedImmediate(displacement)
...@@ -169,10 +169,10 @@ class X64OperandGenerator final : public OperandGenerator { ...@@ -169,10 +169,10 @@ class X64OperandGenerator final : public OperandGenerator {
DCHECK(scale_exponent >= 0 && scale_exponent <= 3); DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
if (displacement != nullptr) { if (displacement != nullptr) {
if (index == nullptr) { if (index == nullptr) {
inputs[(*input_count)++] = UseRegister(displacement); inputs[(*input_count)++] = UseRegister(displacement, reg_kind);
mode = kMode_MR; mode = kMode_MR;
} else { } else {
inputs[(*input_count)++] = UseRegister(index); inputs[(*input_count)++] = UseRegister(index, reg_kind);
inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
? UseNegatedImmediate(displacement) ? UseNegatedImmediate(displacement)
: UseImmediate(displacement); : UseImmediate(displacement);
...@@ -181,22 +181,22 @@ class X64OperandGenerator final : public OperandGenerator { ...@@ -181,22 +181,22 @@ class X64OperandGenerator final : public OperandGenerator {
mode = kMnI_modes[scale_exponent]; mode = kMnI_modes[scale_exponent];
} }
} else { } else {
inputs[(*input_count)++] = UseRegister(index); inputs[(*input_count)++] = UseRegister(index, reg_kind);
static const AddressingMode kMn_modes[] = {kMode_MR, kMode_MR1, static const AddressingMode kMn_modes[] = {kMode_MR, kMode_MR1,
kMode_M4, kMode_M8}; kMode_M4, kMode_M8};
mode = kMn_modes[scale_exponent]; mode = kMn_modes[scale_exponent];
if (mode == kMode_MR1) { if (mode == kMode_MR1) {
// [%r1 + %r1*1] has a smaller encoding than [%r1*2+0] // [%r1 + %r1*1] has a smaller encoding than [%r1*2+0]
inputs[(*input_count)++] = UseRegister(index); inputs[(*input_count)++] = UseRegister(index, reg_kind);
} }
} }
} }
return mode; return mode;
} }
AddressingMode GetEffectiveAddressMemoryOperand(Node* operand, AddressingMode GetEffectiveAddressMemoryOperand(
InstructionOperand inputs[], Node* operand, InstructionOperand inputs[], size_t* input_count,
size_t* input_count) { RegisterUseKind reg_kind = RegisterUseKind::kUseRegister) {
{ {
LoadMatcher<ExternalReferenceMatcher> m(operand); LoadMatcher<ExternalReferenceMatcher> m(operand);
if (m.index().HasResolvedValue() && m.object().HasResolvedValue() && if (m.index().HasResolvedValue() && m.object().HasResolvedValue() &&
...@@ -217,7 +217,7 @@ class X64OperandGenerator final : public OperandGenerator { ...@@ -217,7 +217,7 @@ class X64OperandGenerator final : public OperandGenerator {
if (m.displacement() == nullptr || CanBeImmediate(m.displacement())) { if (m.displacement() == nullptr || CanBeImmediate(m.displacement())) {
return GenerateMemoryOperandInputs( return GenerateMemoryOperandInputs(
m.index(), m.scale(), m.base(), m.displacement(), m.index(), m.scale(), m.base(), m.displacement(),
m.displacement_mode(), inputs, input_count); m.displacement_mode(), inputs, input_count, reg_kind);
} else if (m.base() == nullptr && } else if (m.base() == nullptr &&
m.displacement_mode() == kPositiveDisplacement) { m.displacement_mode() == kPositiveDisplacement) {
// The displacement cannot be an immediate, but we can use the // The displacement cannot be an immediate, but we can use the
...@@ -225,10 +225,10 @@ class X64OperandGenerator final : public OperandGenerator { ...@@ -225,10 +225,10 @@ class X64OperandGenerator final : public OperandGenerator {
// modes for the scale. // modes for the scale.
return GenerateMemoryOperandInputs(m.index(), m.scale(), m.displacement(), return GenerateMemoryOperandInputs(m.index(), m.scale(), m.displacement(),
nullptr, m.displacement_mode(), inputs, nullptr, m.displacement_mode(), inputs,
input_count); input_count, reg_kind);
} else { } else {
inputs[(*input_count)++] = UseRegister(operand->InputAt(0)); inputs[(*input_count)++] = UseRegister(operand->InputAt(0), reg_kind);
inputs[(*input_count)++] = UseRegister(operand->InputAt(1)); inputs[(*input_count)++] = UseRegister(operand->InputAt(1), reg_kind);
return kMode_MR1; return kMode_MR1;
} }
} }
...@@ -515,26 +515,23 @@ void InstructionSelector::VisitStore(Node* node) { ...@@ -515,26 +515,23 @@ void InstructionSelector::VisitStore(Node* node) {
// On TSAN builds we require two scratch registers. Because of this we also // On TSAN builds we require two scratch registers. Because of this we also
// have to modify the inputs to take into account possible aliasing and use // have to modify the inputs to take into account possible aliasing and use
// UseUniqueRegister which is not required for non-TSAN builds. // UseUniqueRegister which is not required for non-TSAN builds.
AddressingMode addressing_mode;
InstructionOperand inputs[] = {
g.UseUniqueRegister(base),
g.GetEffectiveIndexOperand(index, &addressing_mode),
g.CanBeImmediate(value) ? g.UseImmediate(value)
: g.UseUniqueRegister(value)};
size_t input_count = arraysize(inputs);
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()}; InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
size_t temp_count = arraysize(temps); size_t temp_count = arraysize(temps);
auto reg_kind = OperandGenerator::RegisterUseKind::kUseUniqueRegister;
#else #else
InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode addressing_mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionOperand value_operand =
g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
inputs[input_count++] = value_operand;
InstructionOperand* temps = nullptr; InstructionOperand* temps = nullptr;
size_t temp_count = 0; size_t temp_count = 0;
auto reg_kind = OperandGenerator::RegisterUseKind::kUseRegister;
#endif // V8_IS_TSAN #endif // V8_IS_TSAN
InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode addressing_mode = g.GetEffectiveAddressMemoryOperand(
node, inputs, &input_count, reg_kind);
InstructionOperand value_operand = g.CanBeImmediate(value)
? g.UseImmediate(value)
: g.UseRegister(value, reg_kind);
inputs[input_count++] = value_operand;
ArchOpcode opcode = GetStoreOpcode(store_rep); ArchOpcode opcode = GetStoreOpcode(store_rep);
InstructionCode code = InstructionCode code =
opcode | AddressingModeField::encode(addressing_mode); opcode | AddressingModeField::encode(addressing_mode);
...@@ -546,20 +543,34 @@ void InstructionSelector::VisitStore(Node* node) { ...@@ -546,20 +543,34 @@ void InstructionSelector::VisitStore(Node* node) {
void InstructionSelector::VisitProtectedStore(Node* node) { void InstructionSelector::VisitProtectedStore(Node* node) {
X64OperandGenerator g(this); X64OperandGenerator g(this);
Node* value = node->InputAt(2); Node* value = node->InputAt(2);
StoreRepresentation store_rep = StoreRepresentationOf(node->op()); StoreRepresentation store_rep = StoreRepresentationOf(node->op());
ArchOpcode opcode = GetStoreOpcode(store_rep); #ifdef V8_IS_TSAN
// On TSAN builds we require two scratch registers. Because of this we also
// have to modify the inputs to take into account possible aliasing and use
// UseUniqueRegister which is not required for non-TSAN builds.
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
size_t temp_count = arraysize(temps);
auto reg_kind = OperandGenerator::RegisterUseKind::kUseUniqueRegister;
#else
InstructionOperand* temps = nullptr;
size_t temp_count = 0;
auto reg_kind = OperandGenerator::RegisterUseKind::kUseRegister;
#endif // V8_IS_TSAN
InstructionOperand inputs[4]; InstructionOperand inputs[4];
size_t input_count = 0; size_t input_count = 0;
AddressingMode addressing_mode = AddressingMode addressing_mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count); g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count, reg_kind);
InstructionOperand value_operand = g.CanBeImmediate(value)
? g.UseImmediate(value)
: g.UseRegister(value, reg_kind);
inputs[input_count++] = value_operand;
ArchOpcode opcode = GetStoreOpcode(store_rep);
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) | InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
AccessModeField::encode(kMemoryAccessProtected); AccessModeField::encode(kMemoryAccessProtected);
InstructionOperand value_operand = Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs,
g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value); temp_count, temps);
inputs[input_count++] = value_operand;
Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
} }
// Architecture supports unaligned access, therefore VisitLoad is used instead // Architecture supports unaligned access, therefore VisitLoad is used instead
......
...@@ -105,6 +105,12 @@ bool IsUnexpectedCodeObject(Isolate* isolate, HeapObject obj) { ...@@ -105,6 +105,12 @@ bool IsUnexpectedCodeObject(Isolate* isolate, HeapObject obj) {
case Builtin::kRecordWriteOmitRememberedSetSaveFP: case Builtin::kRecordWriteOmitRememberedSetSaveFP:
case Builtin::kRecordWriteEmitRememberedSetIgnoreFP: case Builtin::kRecordWriteEmitRememberedSetIgnoreFP:
case Builtin::kRecordWriteOmitRememberedSetIgnoreFP: case Builtin::kRecordWriteOmitRememberedSetIgnoreFP:
#ifdef V8_IS_TSAN
case Builtin::kTSANRelaxedStore32IgnoreFP:
case Builtin::kTSANRelaxedStore32SaveFP:
case Builtin::kTSANRelaxedStore64IgnoreFP:
case Builtin::kTSANRelaxedStore64SaveFP:
#endif // V8_IS_TSAN
return false; return false;
default: default:
return true; return true;
......
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
#include <atomic>
#include "src/api/api-inl.h" #include "src/api/api-inl.h"
#include "src/objects/objects-inl.h" #include "src/objects/objects-inl.h"
#include "src/snapshot/code-serializer.h" #include "src/snapshot/code-serializer.h"
...@@ -15,7 +17,6 @@ ...@@ -15,7 +17,6 @@
#include "src/wasm/wasm-module.h" #include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h" #include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-opcodes.h" #include "src/wasm/wasm-opcodes.h"
#include "test/cctest/cctest.h" #include "test/cctest/cctest.h"
#include "test/common/wasm/flag-utils.h" #include "test/common/wasm/flag-utils.h"
#include "test/common/wasm/test-signatures.h" #include "test/common/wasm/test-signatures.h"
...@@ -499,7 +500,7 @@ TEST(MemoryGrowZero) { ...@@ -499,7 +500,7 @@ TEST(MemoryGrowZero) {
class InterruptThread : public v8::base::Thread { class InterruptThread : public v8::base::Thread {
public: public:
explicit InterruptThread(Isolate* isolate, int32_t* memory) explicit InterruptThread(Isolate* isolate, std::atomic<int32_t>* memory)
: Thread(Options("TestInterruptLoop")), : Thread(Options("TestInterruptLoop")),
isolate_(isolate), isolate_(isolate),
memory_(memory) {} memory_(memory) {}
...@@ -515,14 +516,14 @@ class InterruptThread : public v8::base::Thread { ...@@ -515,14 +516,14 @@ class InterruptThread : public v8::base::Thread {
// Wait for the main thread to write the signal value. // Wait for the main thread to write the signal value.
int32_t val = 0; int32_t val = 0;
do { do {
val = memory_[0]; val = memory_[0].load(std::memory_order_relaxed);
val = ReadLittleEndianValue<int32_t>(reinterpret_cast<Address>(&val)); val = ReadLittleEndianValue<int32_t>(reinterpret_cast<Address>(&val));
} while (val != signal_value_); } while (val != signal_value_);
isolate_->RequestInterrupt(&OnInterrupt, const_cast<int32_t*>(memory_)); isolate_->RequestInterrupt(&OnInterrupt, memory_);
} }
Isolate* isolate_; Isolate* isolate_;
volatile int32_t* memory_; std::atomic<int32_t>* memory_;
static const int32_t interrupt_location_ = 10; static const int32_t interrupt_location_ = 10;
static const int32_t interrupt_value_ = 154; static const int32_t interrupt_value_ = 154;
static const int32_t signal_value_ = 1221; static const int32_t signal_value_ = 1221;
...@@ -576,7 +577,8 @@ TEST(TestInterruptLoop) { ...@@ -576,7 +577,8 @@ TEST(TestInterruptLoop) {
Handle<JSArrayBuffer> memory(instance->memory_object().array_buffer(), Handle<JSArrayBuffer> memory(instance->memory_object().array_buffer(),
isolate); isolate);
int32_t* memory_array = reinterpret_cast<int32_t*>(memory->backing_store()); std::atomic<int32_t>* memory_array =
reinterpret_cast<std::atomic<int32_t>*>(memory->backing_store());
InterruptThread thread(isolate, memory_array); InterruptThread thread(isolate, memory_array);
CHECK(thread.Start()); CHECK(thread.Start());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment