Commit 81cb8411 authored by binji's avatar binji Committed by Commit bot

[Atomics] Make Atomics.store a builtin using TF

BUG=v8:4614
R=bmeurer@chromium.org,jarin@chromium.org
LOG=n

Review-Url: https://codereview.chromium.org/1938213002
Cr-Commit-Position: refs/heads/master@{#35993}
parent d622c3a8
...@@ -2500,6 +2500,8 @@ void Genesis::InitializeGlobal_harmony_sharedarraybuffer() { ...@@ -2500,6 +2500,8 @@ void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
SimpleInstallFunction(atomics_object, factory->InternalizeUtf8String("load"), SimpleInstallFunction(atomics_object, factory->InternalizeUtf8String("load"),
Builtins::kAtomicsLoad, 2, true); Builtins::kAtomicsLoad, 2, true);
SimpleInstallFunction(atomics_object, factory->InternalizeUtf8String("store"),
Builtins::kAtomicsStore, 3, true);
} }
......
...@@ -5376,6 +5376,58 @@ void Builtins::Generate_AtomicsLoad(CodeStubAssembler* a) { ...@@ -5376,6 +5376,58 @@ void Builtins::Generate_AtomicsLoad(CodeStubAssembler* a) {
a->Return(a->Int32Constant(0)); a->Return(a->Int32Constant(0));
} }
void Builtins::Generate_AtomicsStore(CodeStubAssembler* a) {
using namespace compiler;
Node* array = a->Parameter(1);
Node* index = a->Parameter(2);
Node* value = a->Parameter(3);
Node* context = a->Parameter(4 + 2);
Node* instance_type;
Node* backing_store;
ValidateSharedTypedArray(a, array, context, &instance_type, &backing_store);
Node* index_word32 = ConvertTaggedAtomicIndexToWord32(a, index, context);
Node* array_length_word32 = a->TruncateTaggedToWord32(
context, a->LoadObjectField(array, JSTypedArray::kLengthOffset));
ValidateAtomicIndex(a, index_word32, array_length_word32, context);
Node* index_word = a->ChangeUint32ToWord(index_word32);
Callable to_integer = CodeFactory::ToInteger(a->isolate());
Node* value_integer = a->CallStub(to_integer, context, value);
Node* value_word32 = a->TruncateTaggedToWord32(context, value_integer);
CodeStubAssembler::Label u8(a), u16(a), u32(a), other(a);
int32_t case_values[] = {
FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
};
CodeStubAssembler::Label* case_labels[] = {
&u8, &u8, &u16, &u16, &u32, &u32,
};
a->Switch(instance_type, &other, case_values, case_labels,
arraysize(case_labels));
a->Bind(&u8);
a->AtomicStore(MachineRepresentation::kWord8, backing_store, index_word,
value_word32);
a->Return(value_integer);
a->Bind(&u16);
a->SmiTag(a->AtomicStore(MachineRepresentation::kWord16, backing_store,
a->WordShl(index_word, 1), value_word32));
a->Return(value_integer);
a->Bind(&u32);
a->AtomicStore(MachineRepresentation::kWord32, backing_store,
a->WordShl(index_word, 2), value_word32);
a->Return(value_integer);
// This shouldn't happen, we've already validated the type.
a->Bind(&other);
a->Return(a->Int32Constant(0));
}
#define DEFINE_BUILTIN_ACCESSOR_C(name, ignore) \ #define DEFINE_BUILTIN_ACCESSOR_C(name, ignore) \
Handle<Code> Builtins::name() { \ Handle<Code> Builtins::name() { \
Code** code_address = \ Code** code_address = \
......
...@@ -322,7 +322,8 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) { ...@@ -322,7 +322,8 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
V(ArrayIsArray, 2) \ V(ArrayIsArray, 2) \
V(StringPrototypeCharAt, 2) \ V(StringPrototypeCharAt, 2) \
V(StringPrototypeCharCodeAt, 2) \ V(StringPrototypeCharCodeAt, 2) \
V(AtomicsLoad, 3) V(AtomicsLoad, 3) \
V(AtomicsStore, 4)
// Define list of builtin handlers implemented in assembly. // Define list of builtin handlers implemented in assembly.
#define BUILTIN_LIST_H(V) \ #define BUILTIN_LIST_H(V) \
...@@ -681,6 +682,7 @@ class Builtins { ...@@ -681,6 +682,7 @@ class Builtins {
static void Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm); static void Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm);
static void Generate_AtomicsLoad(CodeStubAssembler* assembler); static void Generate_AtomicsLoad(CodeStubAssembler* assembler);
static void Generate_AtomicsStore(CodeStubAssembler* assembler);
static void InitBuiltinFunctionTable(); static void InitBuiltinFunctionTable();
......
...@@ -395,6 +395,14 @@ Condition FlagsConditionToCondition(FlagsCondition condition) { ...@@ -395,6 +395,14 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
__ dmb(ISH); \ __ dmb(ISH); \
} while (0) } while (0)
#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
do { \
__ dmb(ISH); \
__ asm_instr(i.InputRegister(2), \
MemOperand(i.InputRegister(0), i.InputRegister(1))); \
__ dmb(ISH); \
} while (0)
void CodeGenerator::AssembleDeconstructFrame() { void CodeGenerator::AssembleDeconstructFrame() {
__ LeaveFrame(StackFrame::MANUAL); __ LeaveFrame(StackFrame::MANUAL);
} }
...@@ -1274,6 +1282,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1274,6 +1282,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicLoadWord32: case kAtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldr); ASSEMBLE_ATOMIC_LOAD_INTEGER(ldr);
break; break;
case kAtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(strb);
break;
case kAtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(strh);
break;
case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(str);
break;
} }
return kSuccess; return kSuccess;
} // NOLINT(readability/fn_size) } // NOLINT(readability/fn_size)
......
...@@ -1835,6 +1835,38 @@ void InstructionSelector::VisitAtomicLoad(Node* node) { ...@@ -1835,6 +1835,38 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index)); g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
} }
void InstructionSelector::VisitAtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
opcode = kAtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
opcode = kAtomicStoreWord32;
break;
default:
UNREACHABLE();
return;
}
AddressingMode addressing_mode = kMode_Offset_RR;
InstructionOperand inputs[4];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
inputs[input_count++] = g.UseUniqueRegister(index);
inputs[input_count++] = g.UseUniqueRegister(value);
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, 0, nullptr, input_count, inputs);
}
// static // static
MachineOperatorBuilder::Flags MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() { InstructionSelector::SupportedMachineOperatorFlags() {
......
...@@ -479,6 +479,14 @@ Condition FlagsConditionToCondition(FlagsCondition condition) { ...@@ -479,6 +479,14 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
__ Dmb(InnerShareable, BarrierAll); \ __ Dmb(InnerShareable, BarrierAll); \
} while (0) } while (0)
#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
do { \
__ Dmb(InnerShareable, BarrierAll); \
__ asm_instr(i.InputRegister(2), \
MemOperand(i.InputRegister(0), i.InputRegister(1))); \
__ Dmb(InnerShareable, BarrierAll); \
} while (0)
void CodeGenerator::AssembleDeconstructFrame() { void CodeGenerator::AssembleDeconstructFrame() {
const CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); const CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (descriptor->IsCFunctionCall() || descriptor->UseNativeStack()) { if (descriptor->IsCFunctionCall() || descriptor->UseNativeStack()) {
...@@ -1431,6 +1439,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1431,6 +1439,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
MemOperand(i.InputRegister(0), i.InputRegister(1))); MemOperand(i.InputRegister(0), i.InputRegister(1)));
__ Dmb(InnerShareable, BarrierAll); __ Dmb(InnerShareable, BarrierAll);
break; break;
case kAtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(Strb);
break;
case kAtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(Strh);
break;
case kAtomicStoreWord32:
__ Dmb(InnerShareable, BarrierAll);
__ Str(i.InputRegister32(2),
MemOperand(i.InputRegister(0), i.InputRegister(1)));
__ Dmb(InnerShareable, BarrierAll);
break;
} }
return kSuccess; return kSuccess;
} // NOLINT(readability/fn_size) } // NOLINT(readability/fn_size)
......
...@@ -2267,6 +2267,38 @@ void InstructionSelector::VisitAtomicLoad(Node* node) { ...@@ -2267,6 +2267,38 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index)); g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
} }
void InstructionSelector::VisitAtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
opcode = kAtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
opcode = kAtomicStoreWord32;
break;
default:
UNREACHABLE();
return;
}
AddressingMode addressing_mode = kMode_MRR;
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
inputs[input_count++] = g.UseUniqueRegister(index);
inputs[input_count++] = g.UseUniqueRegister(value);
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, 0, nullptr, input_count, inputs);
}
// static // static
MachineOperatorBuilder::Flags MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() { InstructionSelector::SupportedMachineOperatorFlags() {
......
...@@ -234,6 +234,11 @@ Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base, ...@@ -234,6 +234,11 @@ Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
return raw_assembler_->Store(rep, base, index, value, kNoWriteBarrier); return raw_assembler_->Store(rep, base, index, value, kNoWriteBarrier);
} }
Node* CodeAssembler::AtomicStore(MachineRepresentation rep, Node* base,
Node* index, Node* value) {
return raw_assembler_->AtomicStore(rep, base, index, value);
}
Node* CodeAssembler::Projection(int index, Node* value) { Node* CodeAssembler::Projection(int index, Node* value) {
return raw_assembler_->Projection(index, value); return raw_assembler_->Projection(index, value);
} }
......
...@@ -232,6 +232,8 @@ class CodeAssembler { ...@@ -232,6 +232,8 @@ class CodeAssembler {
Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value); Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value);
Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* index, Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* index,
Node* value); Node* value);
Node* AtomicStore(MachineRepresentation rep, Node* base, Node* index,
Node* value);
// Basic arithmetic operations. // Basic arithmetic operations.
#define DECLARE_CODE_ASSEMBLER_BINARY_OP(name) Node* name(Node* a, Node* b); #define DECLARE_CODE_ASSEMBLER_BINARY_OP(name) Node* name(Node* a, Node* b);
......
...@@ -1280,6 +1280,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1280,6 +1280,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
break; break;
} }
case kIA32Xchgb: {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
__ xchg_b(i.InputRegister(index), operand);
break;
}
case kIA32Xchgw: {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
__ xchg_w(i.InputRegister(index), operand);
break;
}
case kIA32Xchgl: {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
__ xchg(i.InputRegister(index), operand);
break;
}
case kCheckedLoadInt8: case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b); ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
break; break;
...@@ -1331,6 +1349,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1331,6 +1349,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicLoadInt16: case kAtomicLoadInt16:
case kAtomicLoadUint16: case kAtomicLoadUint16:
case kAtomicLoadWord32: case kAtomicLoadWord32:
case kAtomicStoreWord8:
case kAtomicStoreWord16:
case kAtomicStoreWord32:
UNREACHABLE(); // Won't be generated by instruction selector. UNREACHABLE(); // Won't be generated by instruction selector.
break; break;
} }
......
...@@ -113,7 +113,10 @@ namespace compiler { ...@@ -113,7 +113,10 @@ namespace compiler {
V(IA32PushFloat32) \ V(IA32PushFloat32) \
V(IA32PushFloat64) \ V(IA32PushFloat64) \
V(IA32Poke) \ V(IA32Poke) \
V(IA32StackCheck) V(IA32StackCheck) \
V(IA32Xchgb) \
V(IA32Xchgw) \
V(IA32Xchgl)
// Addressing modes represent the "shape" of inputs to an instruction. // Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes // Many instructions support multiple addressing modes. Addressing modes
......
...@@ -127,6 +127,11 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -127,6 +127,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Poke: case kIA32Poke:
return kHasSideEffect; return kHasSideEffect;
case kIA32Xchgb:
case kIA32Xchgw:
case kIA32Xchgl:
return kIsLoadOperation | kHasSideEffect;
#define CASE(Name) case k##Name: #define CASE(Name) case k##Name:
COMMON_ARCH_OPCODE_LIST(CASE) COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE #undef CASE
......
...@@ -1576,6 +1576,44 @@ void InstructionSelector::VisitAtomicLoad(Node* node) { ...@@ -1576,6 +1576,44 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
VisitLoad(node); VisitLoad(node);
} }
void InstructionSelector::VisitAtomicStore(Node* node) {
IA32OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kIA32Xchgb;
break;
case MachineRepresentation::kWord16:
opcode = kIA32Xchgw;
break;
case MachineRepresentation::kWord32:
opcode = kIA32Xchgl;
break;
default:
UNREACHABLE();
break;
}
AddressingMode addressing_mode;
InstructionOperand inputs[4];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
if (g.CanBeImmediate(index)) {
inputs[input_count++] = g.UseImmediate(index);
addressing_mode = kMode_MRI;
} else {
inputs[input_count++] = g.UseUniqueRegister(index);
addressing_mode = kMode_MR1;
}
inputs[input_count++] = g.UseUniqueRegister(value);
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, 0, nullptr, input_count, inputs);
}
// static // static
MachineOperatorBuilder::Flags MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() { InstructionSelector::SupportedMachineOperatorFlags() {
......
...@@ -83,7 +83,10 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny }; ...@@ -83,7 +83,10 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
V(AtomicLoadUint8) \ V(AtomicLoadUint8) \
V(AtomicLoadInt16) \ V(AtomicLoadInt16) \
V(AtomicLoadUint16) \ V(AtomicLoadUint16) \
V(AtomicLoadWord32) V(AtomicLoadWord32) \
V(AtomicStoreWord8) \
V(AtomicStoreWord16) \
V(AtomicStoreWord32)
#define ARCH_OPCODE_LIST(V) \ #define ARCH_OPCODE_LIST(V) \
COMMON_ARCH_OPCODE_LIST(V) \ COMMON_ARCH_OPCODE_LIST(V) \
......
...@@ -277,6 +277,11 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const { ...@@ -277,6 +277,11 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kAtomicLoadWord32: case kAtomicLoadWord32:
return kIsLoadOperation; return kIsLoadOperation;
case kAtomicStoreWord8:
case kAtomicStoreWord16:
case kAtomicStoreWord32:
return kHasSideEffect;
#define CASE(Name) case k##Name: #define CASE(Name) case k##Name:
TARGET_ARCH_OPCODE_LIST(CASE) TARGET_ARCH_OPCODE_LIST(CASE)
#undef CASE #undef CASE
......
...@@ -1188,6 +1188,8 @@ void InstructionSelector::VisitNode(Node* node) { ...@@ -1188,6 +1188,8 @@ void InstructionSelector::VisitNode(Node* node) {
MarkAsRepresentation(type.representation(), node); MarkAsRepresentation(type.representation(), node);
return VisitAtomicLoad(node); return VisitAtomicLoad(node);
} }
case IrOpcode::kAtomicStore:
return VisitAtomicStore(node);
default: default:
V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d", V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
node->opcode(), node->op()->mnemonic(), node->id()); node->opcode(), node->op()->mnemonic(), node->id());
......
...@@ -63,6 +63,11 @@ MachineRepresentation StackSlotRepresentationOf(Operator const* op) { ...@@ -63,6 +63,11 @@ MachineRepresentation StackSlotRepresentationOf(Operator const* op) {
return OpParameter<MachineRepresentation>(op); return OpParameter<MachineRepresentation>(op);
} }
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
DCHECK_EQ(IrOpcode::kAtomicStore, op->opcode());
return OpParameter<MachineRepresentation>(op);
}
#define PURE_OP_LIST(V) \ #define PURE_OP_LIST(V) \
V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \ V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Word32Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \ V(Word32Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
...@@ -400,6 +405,11 @@ MachineRepresentation StackSlotRepresentationOf(Operator const* op) { ...@@ -400,6 +405,11 @@ MachineRepresentation StackSlotRepresentationOf(Operator const* op) {
V(Int32) \ V(Int32) \
V(Uint32) V(Uint32)
#define ATOMIC_REPRESENTATION_LIST(V) \
V(kWord8) \
V(kWord16) \
V(kWord32)
struct MachineOperatorGlobalCache { struct MachineOperatorGlobalCache {
#define PURE(Name, properties, value_input_count, control_input_count, \ #define PURE(Name, properties, value_input_count, control_input_count, \
output_count) \ output_count) \
...@@ -491,7 +501,7 @@ struct MachineOperatorGlobalCache { ...@@ -491,7 +501,7 @@ struct MachineOperatorGlobalCache {
MACHINE_REPRESENTATION_LIST(STORE) MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE #undef STORE
#define ATOMIC(Type) \ #define ATOMIC_LOAD(Type) \
struct AtomicLoad##Type##Operator final \ struct AtomicLoad##Type##Operator final \
: public Operator1<LoadRepresentation> { \ : public Operator1<LoadRepresentation> { \
AtomicLoad##Type##Operator() \ AtomicLoad##Type##Operator() \
...@@ -500,8 +510,20 @@ struct MachineOperatorGlobalCache { ...@@ -500,8 +510,20 @@ struct MachineOperatorGlobalCache {
"AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \ "AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \ }; \
AtomicLoad##Type##Operator kAtomicLoad##Type; AtomicLoad##Type##Operator kAtomicLoad##Type;
ATOMIC_TYPE_LIST(ATOMIC) ATOMIC_TYPE_LIST(ATOMIC_LOAD)
#undef ATOMIC #undef ATOMIC_LOAD
#define ATOMIC_STORE(Type) \
struct AtomicStore##Type##Operator \
: public Operator1<MachineRepresentation> { \
AtomicStore##Type##Operator() \
: Operator1<MachineRepresentation>( \
IrOpcode::kAtomicStore, Operator::kNoRead | Operator::kNoThrow, \
"AtomicStore", 3, 1, 1, 0, 1, 0, MachineRepresentation::Type) {} \
}; \
AtomicStore##Type##Operator kAtomicStore##Type;
ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
#undef STORE
}; };
...@@ -635,6 +657,17 @@ const Operator* MachineOperatorBuilder::AtomicLoad(LoadRepresentation rep) { ...@@ -635,6 +657,17 @@ const Operator* MachineOperatorBuilder::AtomicLoad(LoadRepresentation rep) {
return nullptr; return nullptr;
} }
const Operator* MachineOperatorBuilder::AtomicStore(MachineRepresentation rep) {
#define STORE(kRep) \
if (rep == MachineRepresentation::kRep) { \
return &cache_.kAtomicStore##kRep; \
}
ATOMIC_REPRESENTATION_LIST(STORE)
#undef STORE
UNREACHABLE();
return nullptr;
}
} // namespace compiler } // namespace compiler
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -78,6 +78,8 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const*); ...@@ -78,6 +78,8 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const*);
MachineRepresentation StackSlotRepresentationOf(Operator const* op); MachineRepresentation StackSlotRepresentationOf(Operator const* op);
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op);
// Interface for building machine-level operators. These operators are // Interface for building machine-level operators. These operators are
// machine-level but machine-independent and thus define a language suitable // machine-level but machine-independent and thus define a language suitable
// for generating code to run on architectures such as ia32, x64, arm, etc. // for generating code to run on architectures such as ia32, x64, arm, etc.
...@@ -501,6 +503,8 @@ class MachineOperatorBuilder final : public ZoneObject { ...@@ -501,6 +503,8 @@ class MachineOperatorBuilder final : public ZoneObject {
// atomic-load [base + index] // atomic-load [base + index]
const Operator* AtomicLoad(LoadRepresentation rep); const Operator* AtomicLoad(LoadRepresentation rep);
// atomic-store [base + index], value
const Operator* AtomicStore(MachineRepresentation rep);
// Target machine word-size assumed by this builder. // Target machine word-size assumed by this builder.
bool Is32() const { return word() == MachineRepresentation::kWord32; } bool Is32() const { return word() == MachineRepresentation::kWord32; }
......
...@@ -1346,6 +1346,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1346,6 +1346,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicLoadWord32: case kAtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lw); ASSEMBLE_ATOMIC_LOAD_INTEGER(lw);
break; break;
case kAtomicStoreWord8:
case kAtomicStoreWord16:
case kAtomicStoreWord32:
// TODO(binji): implement
__ nop();
break;
} }
return kSuccess; return kSuccess;
} // NOLINT(readability/fn_size) } // NOLINT(readability/fn_size)
......
...@@ -1481,6 +1481,43 @@ void InstructionSelector::VisitAtomicLoad(Node* node) { ...@@ -1481,6 +1481,43 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
} }
} }
void InstructionSelector::VisitAtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
MipsOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
opcode = kAtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
opcode = kAtomicStoreWord32;
break;
default:
UNREACHABLE();
return;
}
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index),
g.UseRegister(value));
} else {
InstructionOperand addr_reg = g.TempRegister();
Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
g.UseRegister(index), g.UseRegister(base));
// Emit desired store opcode, using temp addr_reg.
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), addr_reg, g.TempImmediate(0),
g.UseRegister(value));
}
}
// static // static
MachineOperatorBuilder::Flags MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() { InstructionSelector::SupportedMachineOperatorFlags() {
......
...@@ -1596,6 +1596,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1596,6 +1596,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicLoadWord32: case kAtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lw); ASSEMBLE_ATOMIC_LOAD_INTEGER(lw);
break; break;
case kAtomicStoreWord8:
case kAtomicStoreWord16:
case kAtomicStoreWord32:
// TODO(binji): implement
__ nop();
break;
} }
return kSuccess; return kSuccess;
} // NOLINT(readability/fn_size) } // NOLINT(readability/fn_size)
......
...@@ -1992,6 +1992,43 @@ void InstructionSelector::VisitAtomicLoad(Node* node) { ...@@ -1992,6 +1992,43 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
} }
} }
void InstructionSelector::VisitAtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
Mips64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
opcode = kAtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
opcode = kAtomicStoreWord32;
break;
default:
UNREACHABLE();
return;
}
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index),
g.UseRegister(value));
} else {
InstructionOperand addr_reg = g.TempRegister();
Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
g.UseRegister(index), g.UseRegister(base));
// Emit desired store opcode, using temp addr_reg.
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), addr_reg, g.TempImmediate(0),
g.UseRegister(value));
}
}
// static // static
MachineOperatorBuilder::Flags MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() { InstructionSelector::SupportedMachineOperatorFlags() {
......
...@@ -357,7 +357,8 @@ ...@@ -357,7 +357,8 @@
V(Word32PairShl) \ V(Word32PairShl) \
V(Word32PairShr) \ V(Word32PairShr) \
V(Word32PairSar) \ V(Word32PairSar) \
V(AtomicLoad) V(AtomicLoad) \
V(AtomicStore)
#define MACHINE_SIMD_RETURN_SIMD_OP_LIST(V) \ #define MACHINE_SIMD_RETURN_SIMD_OP_LIST(V) \
V(CreateFloat32x4) \ V(CreateFloat32x4) \
......
...@@ -137,6 +137,10 @@ class RawMachineAssembler { ...@@ -137,6 +137,10 @@ class RawMachineAssembler {
Node* AtomicLoad(MachineType rep, Node* base, Node* index) { Node* AtomicLoad(MachineType rep, Node* base, Node* index) {
return AddNode(machine()->AtomicLoad(rep), base, index); return AddNode(machine()->AtomicLoad(rep), base, index);
} }
Node* AtomicStore(MachineRepresentation rep, Node* base, Node* index,
Node* value) {
return AddNode(machine()->AtomicStore(rep), base, index, value);
}
// Arithmetic Operations. // Arithmetic Operations.
Node* WordAnd(Node* a, Node* b) { Node* WordAnd(Node* a, Node* b) {
......
...@@ -2544,9 +2544,14 @@ Type* Typer::Visitor::TypeLoadParentFramePointer(Node* node) { ...@@ -2544,9 +2544,14 @@ Type* Typer::Visitor::TypeLoadParentFramePointer(Node* node) {
Type* Typer::Visitor::TypeCheckedLoad(Node* node) { return Type::Any(); } Type* Typer::Visitor::TypeCheckedLoad(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeCheckedStore(Node* node) {
UNREACHABLE();
return nullptr;
}
Type* Typer::Visitor::TypeAtomicLoad(Node* node) { return Type::Any(); } Type* Typer::Visitor::TypeAtomicLoad(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeCheckedStore(Node* node) { Type* Typer::Visitor::TypeAtomicStore(Node* node) {
UNREACHABLE(); UNREACHABLE();
return nullptr; return nullptr;
} }
......
...@@ -1032,6 +1032,7 @@ void Verifier::Visitor::Check(Node* node) { ...@@ -1032,6 +1032,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kCheckedLoad: case IrOpcode::kCheckedLoad:
case IrOpcode::kCheckedStore: case IrOpcode::kCheckedStore:
case IrOpcode::kAtomicLoad: case IrOpcode::kAtomicLoad:
case IrOpcode::kAtomicStore:
#define SIMD_MACHINE_OP_CASE(Name) case IrOpcode::k##Name: #define SIMD_MACHINE_OP_CASE(Name) case IrOpcode::k##Name:
MACHINE_SIMD_OP_LIST(SIMD_MACHINE_OP_CASE) MACHINE_SIMD_OP_LIST(SIMD_MACHINE_OP_CASE)
......
...@@ -1718,6 +1718,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1718,6 +1718,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
break; break;
} }
case kX64Xchgb: {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
__ xchgb(i.InputRegister(index), operand);
break;
}
case kX64Xchgw: {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
__ xchgw(i.InputRegister(index), operand);
break;
}
case kX64Xchgl: {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
__ xchgl(i.InputRegister(index), operand);
break;
}
case kCheckedLoadInt8: case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl); ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
break; break;
...@@ -1768,6 +1786,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1768,6 +1786,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicLoadInt16: case kAtomicLoadInt16:
case kAtomicLoadUint16: case kAtomicLoadUint16:
case kAtomicLoadWord32: case kAtomicLoadWord32:
case kAtomicStoreWord8:
case kAtomicStoreWord16:
case kAtomicStoreWord32:
UNREACHABLE(); // Won't be generated by instruction selector. UNREACHABLE(); // Won't be generated by instruction selector.
break; break;
} }
......
...@@ -141,7 +141,10 @@ namespace compiler { ...@@ -141,7 +141,10 @@ namespace compiler {
V(X64Inc32) \ V(X64Inc32) \
V(X64Push) \ V(X64Push) \
V(X64Poke) \ V(X64Poke) \
V(X64StackCheck) V(X64StackCheck) \
V(X64Xchgb) \
V(X64Xchgw) \
V(X64Xchgl)
// Addressing modes represent the "shape" of inputs to an instruction. // Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes // Many instructions support multiple addressing modes. Addressing modes
......
...@@ -168,6 +168,11 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -168,6 +168,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Poke: case kX64Poke:
return kHasSideEffect; return kHasSideEffect;
case kX64Xchgb:
case kX64Xchgw:
case kX64Xchgl:
return kIsLoadOperation | kHasSideEffect;
#define CASE(Name) case k##Name: #define CASE(Name) case k##Name:
COMMON_ARCH_OPCODE_LIST(CASE) COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE #undef CASE
......
...@@ -2029,6 +2029,44 @@ void InstructionSelector::VisitAtomicLoad(Node* node) { ...@@ -2029,6 +2029,44 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
VisitLoad(node); VisitLoad(node);
} }
void InstructionSelector::VisitAtomicStore(Node* node) {
X64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kX64Xchgb;
break;
case MachineRepresentation::kWord16:
opcode = kX64Xchgw;
break;
case MachineRepresentation::kWord32:
opcode = kX64Xchgl;
break;
default:
UNREACHABLE();
return;
}
AddressingMode addressing_mode;
InstructionOperand inputs[4];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
if (g.CanBeImmediate(index)) {
inputs[input_count++] = g.UseImmediate(index);
addressing_mode = kMode_MRI;
} else {
inputs[input_count++] = g.UseUniqueRegister(index);
addressing_mode = kMode_MR1;
}
inputs[input_count++] = g.UseUniqueRegister(value);
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
}
// static // static
MachineOperatorBuilder::Flags MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() { InstructionSelector::SupportedMachineOperatorFlags() {
......
...@@ -681,6 +681,18 @@ void Assembler::xchg(Register dst, const Operand& src) { ...@@ -681,6 +681,18 @@ void Assembler::xchg(Register dst, const Operand& src) {
emit_operand(dst, src); emit_operand(dst, src);
} }
void Assembler::xchg_b(Register reg, const Operand& op) {
EnsureSpace ensure_space(this);
EMIT(0x86);
emit_operand(reg, op);
}
void Assembler::xchg_w(Register reg, const Operand& op) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x87);
emit_operand(reg, op);
}
void Assembler::adc(Register dst, int32_t imm32) { void Assembler::adc(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
......
...@@ -655,6 +655,8 @@ class Assembler : public AssemblerBase { ...@@ -655,6 +655,8 @@ class Assembler : public AssemblerBase {
// Exchange // Exchange
void xchg(Register dst, Register src); void xchg(Register dst, Register src);
void xchg(Register dst, const Operand& src); void xchg(Register dst, const Operand& src);
void xchg_b(Register reg, const Operand& op);
void xchg_w(Register reg, const Operand& op);
// Arithmetics // Arithmetics
void adc(Register dst, int32_t imm32); void adc(Register dst, int32_t imm32);
......
...@@ -62,13 +62,6 @@ function AtomicsCompareExchangeJS(sta, index, oldValue, newValue) { ...@@ -62,13 +62,6 @@ function AtomicsCompareExchangeJS(sta, index, oldValue, newValue) {
return %_AtomicsCompareExchange(sta, index, oldValue, newValue); return %_AtomicsCompareExchange(sta, index, oldValue, newValue);
} }
function AtomicsStoreJS(sta, index, value) {
CheckSharedIntegerTypedArray(sta);
index = ValidateIndex(index, %_TypedArrayGetLength(sta));
value = TO_NUMBER(value);
return %_AtomicsStore(sta, index, value);
}
function AtomicsAddJS(ia, index, value) { function AtomicsAddJS(ia, index, value) {
CheckSharedIntegerTypedArray(ia); CheckSharedIntegerTypedArray(ia);
index = ValidateIndex(index, %_TypedArrayGetLength(ia)); index = ValidateIndex(index, %_TypedArrayGetLength(ia));
...@@ -172,7 +165,6 @@ utils.InstallFunctions(Atomics, DONT_ENUM, [ ...@@ -172,7 +165,6 @@ utils.InstallFunctions(Atomics, DONT_ENUM, [
// TODO(binji): remove the rest of the (non futex) Atomics functions as they // TODO(binji): remove the rest of the (non futex) Atomics functions as they
// become builtins. // become builtins.
"compareExchange", AtomicsCompareExchangeJS, "compareExchange", AtomicsCompareExchangeJS,
"store", AtomicsStoreJS,
"add", AtomicsAddJS, "add", AtomicsAddJS,
"sub", AtomicsSubJS, "sub", AtomicsSubJS,
"and", AtomicsAndJS, "and", AtomicsAndJS,
......
...@@ -32,11 +32,6 @@ inline T CompareExchangeSeqCst(T* p, T oldval, T newval) { ...@@ -32,11 +32,6 @@ inline T CompareExchangeSeqCst(T* p, T oldval, T newval) {
return oldval; return oldval;
} }
template <typename T>
inline void StoreSeqCst(T* p, T value) {
__atomic_store_n(p, value, __ATOMIC_SEQ_CST);
}
template <typename T> template <typename T>
inline T AddSeqCst(T* p, T value) { inline T AddSeqCst(T* p, T value) {
return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST); return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST);
...@@ -109,10 +104,6 @@ inline T ExchangeSeqCst(T* p, T value) { ...@@ -109,10 +104,6 @@ inline T ExchangeSeqCst(T* p, T value) {
return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \ return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \
bit_cast<vctype>(newval), \ bit_cast<vctype>(newval), \
bit_cast<vctype>(oldval)); \ bit_cast<vctype>(oldval)); \
} \
inline void StoreSeqCst(type* p, type value) { \
InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \
bit_cast<vctype>(value)); \
} }
ATOMIC_OPS(int8_t, 8, char) ATOMIC_OPS(int8_t, 8, char)
...@@ -207,15 +198,6 @@ inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index, ...@@ -207,15 +198,6 @@ inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
} }
template <typename T>
inline Object* DoStore(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
T value = FromObject<T>(obj);
StoreSeqCst(static_cast<T*>(buffer) + index, value);
return *obj;
}
template <typename T> template <typename T>
inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index, inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) { Handle<Object> obj) {
...@@ -292,15 +274,6 @@ inline Object* DoCompareExchangeUint8Clamped(Isolate* isolate, void* buffer, ...@@ -292,15 +274,6 @@ inline Object* DoCompareExchangeUint8Clamped(Isolate* isolate, void* buffer,
} }
inline Object* DoStoreUint8Clamped(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
typedef int32_t convert_type;
uint8_t value = ClampToUint8(FromObject<convert_type>(obj));
StoreSeqCst(static_cast<uint8_t*>(buffer) + index, value);
return *obj;
}
#define DO_UINT8_CLAMPED_OP(name, op) \ #define DO_UINT8_CLAMPED_OP(name, op) \
inline Object* Do##name##Uint8Clamped(Isolate* isolate, void* buffer, \ inline Object* Do##name##Uint8Clamped(Isolate* isolate, void* buffer, \
size_t index, Handle<Object> obj) { \ size_t index, Handle<Object> obj) { \
...@@ -408,38 +381,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) { ...@@ -408,38 +381,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
} }
RUNTIME_FUNCTION(Runtime_AtomicsStore) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
NumberToSize(isolate, sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
case kExternal##Type##Array: \
return DoStore<ctype>(isolate, source, index, value);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
case kExternalUint8ClampedArray:
return DoStoreUint8Clamped(isolate, source, index, value);
default:
break;
}
UNREACHABLE();
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(Runtime_AtomicsAdd) { RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
HandleScope scope(isolate); HandleScope scope(isolate);
DCHECK(args.length() == 3); DCHECK(args.length() == 3);
......
...@@ -59,7 +59,6 @@ namespace internal { ...@@ -59,7 +59,6 @@ namespace internal {
F(ThrowNotInt32SharedTypedArrayError, 1, 1) \ F(ThrowNotInt32SharedTypedArrayError, 1, 1) \
F(ThrowInvalidAtomicAccessIndexError, 0, 1) \ F(ThrowInvalidAtomicAccessIndexError, 0, 1) \
F(AtomicsCompareExchange, 4, 1) \ F(AtomicsCompareExchange, 4, 1) \
F(AtomicsStore, 3, 1) \
F(AtomicsAdd, 3, 1) \ F(AtomicsAdd, 3, 1) \
F(AtomicsSub, 3, 1) \ F(AtomicsSub, 3, 1) \
F(AtomicsAnd, 3, 1) \ F(AtomicsAnd, 3, 1) \
......
...@@ -1908,6 +1908,25 @@ void Assembler::shrd(Register dst, Register src) { ...@@ -1908,6 +1908,25 @@ void Assembler::shrd(Register dst, Register src) {
emit_modrm(src, dst); emit_modrm(src, dst);
} }
void Assembler::xchgb(Register reg, const Operand& op) {
EnsureSpace ensure_space(this);
if (!reg.is_byte_register()) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX.
emit_rex_32(reg, op);
} else {
emit_optional_rex_32(reg, op);
}
emit(0x86);
emit_operand(reg, op);
}
void Assembler::xchgw(Register reg, const Operand& op) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(reg, op);
emit(0x87);
emit_operand(reg, op);
}
void Assembler::emit_xchg(Register dst, Register src, int size) { void Assembler::emit_xchg(Register dst, Register src, int size) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
......
...@@ -784,6 +784,9 @@ class Assembler : public AssemblerBase { ...@@ -784,6 +784,9 @@ class Assembler : public AssemblerBase {
void decb(Register dst); void decb(Register dst);
void decb(const Operand& dst); void decb(const Operand& dst);
void xchgb(Register reg, const Operand& op);
void xchgw(Register reg, const Operand& op);
// Sign-extends rax into rdx:rax. // Sign-extends rax into rdx:rax.
void cqo(); void cqo();
// Sign-extends eax into edx:eax. // Sign-extends eax into edx:eax.
......
...@@ -421,7 +421,7 @@ function clearArray(sab) { ...@@ -421,7 +421,7 @@ function clearArray(sab) {
assertEquals(50, Atomics.compareExchange(sta, 0, v, v), name); assertEquals(50, Atomics.compareExchange(sta, 0, v, v), name);
// Store // Store
assertEquals(+v, Atomics.store(sta, 0, v), name); assertEquals(v|0, Atomics.store(sta, 0, v), name);
assertEquals(v|0, sta[0], name); assertEquals(v|0, sta[0], name);
// Add // Add
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment