Commit 82b5c8c9 authored by aseemgarg's avatar aseemgarg Committed by Commit bot

[Atomics] Make Atomics.compareExchange a builtin using TF

BUG=v8:4614
R=binji@chromium.org

Review-Url: https://codereview.chromium.org/2649703002
Cr-Commit-Position: refs/heads/master@{#43878}
parent 469b632e
......@@ -315,6 +315,88 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
AtomicExchange(MachineType::Uint32(), backing_store,
WordShl(index_word, 2), value_word32)));
// This shouldn't happen, we've already validated the type.
Bind(&other);
Unreachable();
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
// || V8_TARGET_ARCH_PPC
}
TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
Node* array = Parameter(1);
Node* index = Parameter(2);
Node* old_value = Parameter(3);
Node* new_value = Parameter(4);
Node* context = Parameter(5 + 2);
Node* instance_type;
Node* backing_store;
ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
Node* index_integer;
Node* index_word32 =
ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
Node* array_length_word32 = TruncateTaggedToWord32(
context, LoadObjectField(array, JSTypedArray::kLengthOffset));
ValidateAtomicIndex(index_word32, array_length_word32, context);
Node* old_value_integer = ToInteger(context, old_value);
Node* new_value_integer = ToInteger(context, new_value);
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
Return(CallRuntime(Runtime::kAtomicsCompareExchange, context, array,
index_integer, old_value_integer, new_value_integer));
#else
Node* index_word = ChangeUint32ToWord(index_word32);
Node* old_value_word32 = TruncateTaggedToWord32(context, old_value_integer);
Node* new_value_word32 = TruncateTaggedToWord32(context, new_value_integer);
Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
other(this);
int32_t case_values[] = {
FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
};
Label* case_labels[] = {
&i8, &u8, &i16, &u16, &i32, &u32,
};
Switch(instance_type, &other, case_values, case_labels,
arraysize(case_labels));
Bind(&i8);
Return(SmiFromWord32(AtomicCompareExchange(MachineType::Int8(), backing_store,
index_word, old_value_word32,
new_value_word32)));
Bind(&u8);
Return(SmiFromWord32(
AtomicCompareExchange(MachineType::Uint8(), backing_store, index_word,
old_value_word32, new_value_word32)));
Bind(&i16);
Return(SmiFromWord32(AtomicCompareExchange(
MachineType::Int16(), backing_store, WordShl(index_word, 1),
old_value_word32, new_value_word32)));
Bind(&u16);
Return(SmiFromWord32(AtomicCompareExchange(
MachineType::Uint16(), backing_store, WordShl(index_word, 1),
old_value_word32, new_value_word32)));
Bind(&i32);
Return(ChangeInt32ToTagged(AtomicCompareExchange(
MachineType::Int32(), backing_store, WordShl(index_word, 2),
old_value_word32, new_value_word32)));
Bind(&u32);
Return(ChangeUint32ToTagged(AtomicCompareExchange(
MachineType::Uint32(), backing_store, WordShl(index_word, 2),
old_value_word32, new_value_word32)));
// This shouldn't happen, we've already validated the type.
Bind(&other);
Unreachable();
......
......@@ -185,13 +185,6 @@ namespace {
#if V8_CC_GNU
template <typename T>
inline T CompareExchangeSeqCst(T* p, T oldval, T newval) {
(void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST,
__ATOMIC_SEQ_CST);
return oldval;
}
template <typename T>
inline T AddSeqCst(T* p, T value) {
return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST);
......@@ -219,14 +212,11 @@ inline T XorSeqCst(T* p, T value) {
#elif V8_CC_MSVC
#define InterlockedCompareExchange32 _InterlockedCompareExchange
#define InterlockedExchange32 _InterlockedExchange
#define InterlockedExchangeAdd32 _InterlockedExchangeAdd
#define InterlockedAnd32 _InterlockedAnd
#define InterlockedOr32 _InterlockedOr
#define InterlockedXor32 _InterlockedXor
#define InterlockedExchangeAdd16 _InterlockedExchangeAdd16
#define InterlockedCompareExchange8 _InterlockedCompareExchange8
#define InterlockedExchangeAdd8 _InterlockedExchangeAdd8
#define ATOMIC_OPS(type, suffix, vctype) \
......@@ -249,11 +239,6 @@ inline T XorSeqCst(T* p, T value) {
inline type XorSeqCst(type* p, type value) { \
return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \
bit_cast<vctype>(value)); \
} \
inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \
return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \
bit_cast<vctype>(newval), \
bit_cast<vctype>(oldval)); \
}
ATOMIC_OPS(int8_t, 8, char)
......@@ -266,14 +251,11 @@ ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */
#undef ATOMIC_OPS_INTEGER
#undef ATOMIC_OPS
#undef InterlockedCompareExchange32
#undef InterlockedExchange32
#undef InterlockedExchangeAdd32
#undef InterlockedAnd32
#undef InterlockedOr32
#undef InterlockedXor32
#undef InterlockedExchangeAdd16
#undef InterlockedCompareExchange8
#undef InterlockedExchangeAdd8
#else
......@@ -333,16 +315,6 @@ inline Object* ToObject(Isolate* isolate, uint32_t t) {
return *isolate->factory()->NewNumber(t);
}
template <typename T>
inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
Handle<Object> oldobj, Handle<Object> newobj) {
T oldval = FromObject<T>(oldobj);
T newval = FromObject<T>(newobj);
T result =
CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval);
return ToObject(isolate, result);
}
template <typename T>
inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
......@@ -395,50 +367,6 @@ inline Object* DoXor(Isolate* isolate, void* buffer, size_t index,
V(Uint32, uint32, UINT32, uint32_t, 4) \
V(Int32, int32, INT32, int32_t, 4)
// ES #sec-atomics.wait
// Atomics.compareExchange( typedArray, index, expectedValue, replacementValue )
BUILTIN(AtomicsCompareExchange) {
HandleScope scope(isolate);
Handle<Object> array = args.atOrUndefined(isolate, 1);
Handle<Object> index = args.atOrUndefined(isolate, 2);
Handle<Object> expected_value = args.atOrUndefined(isolate, 3);
Handle<Object> replacement_value = args.atOrUndefined(isolate, 4);
Handle<JSTypedArray> sta;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, sta, ValidateSharedIntegerTypedArray(isolate, array));
Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index);
if (maybe_index.IsNothing()) return isolate->heap()->exception();
size_t i = maybe_index.FromJust();
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, expected_value, Object::ToInteger(isolate, expected_value));
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, replacement_value,
Object::ToInteger(isolate, replacement_value));
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
NumberToSize(sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
case kExternal##Type##Array: \
return DoCompareExchange<ctype>(isolate, source, i, expected_value, \
replacement_value);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
default:
break;
}
UNREACHABLE();
return isolate->heap()->undefined_value();
}
// ES #sec-atomics.add
// Atomics.add( typedArray, index, value )
BUILTIN(AtomicsAdd) {
......
......@@ -740,7 +740,7 @@ class Isolate;
TFJ(AtomicsLoad, 2) \
TFJ(AtomicsStore, 3) \
TFJ(AtomicsExchange, 3) \
CPP(AtomicsCompareExchange) \
TFJ(AtomicsCompareExchange, 4) \
CPP(AtomicsAdd) \
CPP(AtomicsSub) \
CPP(AtomicsAnd) \
......
......@@ -435,6 +435,23 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
__ dmb(ISH); \
} while (0)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr) \
do { \
Label compareExchange; \
Label exit; \
__ dmb(ISH); \
__ bind(&compareExchange); \
__ add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
__ load_instr(i.OutputRegister(0), i.TempRegister(0)); \
__ teq(i.TempRegister(1), Operand(i.OutputRegister(0))); \
__ b(ne, &exit); \
__ store_instr(i.TempRegister(0), i.InputRegister(3), i.TempRegister(0)); \
__ teq(i.TempRegister(0), Operand(0)); \
__ b(ne, &compareExchange); \
__ bind(&exit); \
__ dmb(ISH); \
} while (0)
#define ASSEMBLE_IEEE754_BINOP(name) \
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
......@@ -2142,6 +2159,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrex, strex);
break;
case kAtomicCompareExchangeInt8:
__ uxtb(i.TempRegister(1), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb);
__ sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
case kAtomicCompareExchangeUint8:
__ uxtb(i.TempRegister(1), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb);
break;
case kAtomicCompareExchangeInt16:
__ uxth(i.TempRegister(1), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh);
__ sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
case kAtomicCompareExchangeUint16:
__ uxth(i.TempRegister(1), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh);
break;
case kAtomicCompareExchangeWord32:
__ mov(i.TempRegister(1), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrex, strex);
break;
}
return kSuccess;
} // NOLINT(readability/fn_size)
......
......@@ -2221,6 +2221,45 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs, 1, temp);
}
void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
ArchOpcode opcode = kArchNop;
MachineType type = AtomicCompareExchangeRepresentationOf(node->op());
if (type == MachineType::Int8()) {
opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
return;
}
AddressingMode addressing_mode = kMode_Offset_RR;
InstructionOperand inputs[4];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
inputs[input_count++] = g.UseUniqueRegister(index);
inputs[input_count++] = g.UseUniqueRegister(old_value);
inputs[input_count++] = g.UseUniqueRegister(new_value);
InstructionOperand outputs[1];
outputs[0] = g.UseUniqueRegister(node);
InstructionOperand temp[2];
temp[0] = g.TempRegister();
temp[1] = g.TempRegister();
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, 1, outputs, input_count, inputs, 2, temp);
}
#define SIMD_TYPE_LIST(V) \
V(Float32x4) \
V(Int32x4) \
......
......@@ -101,7 +101,9 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
Register OutputRegister32() { return ToRegister(instr_->Output()).W(); }
Register TempRegister32() { return ToRegister(instr_->TempAt(0)).W(); }
Register TempRegister32(size_t index) {
return ToRegister(instr_->TempAt(index)).W();
}
Operand InputOperand2_32(size_t index) {
switch (AddressingModeField::decode(instr_->opcode())) {
......@@ -537,9 +539,24 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
__ bind(&exchange); \
__ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
__ load_instr(i.OutputRegister32(), i.TempRegister(0)); \
__ store_instr(i.TempRegister32(), i.InputRegister32(2), \
__ store_instr(i.TempRegister32(0), i.InputRegister32(2), \
i.TempRegister(0)); \
__ cbnz(i.TempRegister32(), &exchange); \
__ cbnz(i.TempRegister32(0), &exchange); \
} while (0)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr) \
do { \
Label compareExchange; \
Label exit; \
__ bind(&compareExchange); \
__ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
__ load_instr(i.OutputRegister32(), i.TempRegister(0)); \
__ cmp(i.TempRegister32(1), i.OutputRegister32()); \
__ B(ne, &exit); \
__ store_instr(i.TempRegister32(0), i.InputRegister32(3), \
i.TempRegister(0)); \
__ cbnz(i.TempRegister32(0), &compareExchange); \
__ bind(&exit); \
} while (0)
#define ASSEMBLE_IEEE754_BINOP(name) \
......@@ -1663,6 +1680,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxr, stlxr);
break;
case kAtomicCompareExchangeInt8:
__ Uxtb(i.TempRegister(1), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
case kAtomicCompareExchangeUint8:
__ Uxtb(i.TempRegister(1), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb);
break;
case kAtomicCompareExchangeInt16:
__ Uxth(i.TempRegister(1), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
case kAtomicCompareExchangeUint16:
__ Uxth(i.TempRegister(1), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh);
break;
case kAtomicCompareExchangeWord32:
__ mov(i.TempRegister(1), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr);
break;
}
return kSuccess;
} // NOLINT(readability/fn_size)
......
......@@ -2739,6 +2739,45 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs, 1, temp);
}
void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
ArchOpcode opcode = kArchNop;
MachineType type = AtomicCompareExchangeRepresentationOf(node->op());
if (type == MachineType::Int8()) {
opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
return;
}
AddressingMode addressing_mode = kMode_MRR;
InstructionOperand inputs[4];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
inputs[input_count++] = g.UseUniqueRegister(index);
inputs[input_count++] = g.UseUniqueRegister(old_value);
inputs[input_count++] = g.UseUniqueRegister(new_value);
InstructionOperand outputs[1];
outputs[0] = g.UseUniqueRegister(node);
InstructionOperand temp[2];
temp[0] = g.TempRegister();
temp[1] = g.TempRegister();
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, 1, outputs, input_count, inputs, 2, temp);
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
......
......@@ -477,6 +477,13 @@ Node* CodeAssembler::AtomicExchange(MachineType type, Node* base, Node* offset,
return raw_assembler()->AtomicExchange(type, base, offset, value);
}
Node* CodeAssembler::AtomicCompareExchange(MachineType type, Node* base,
Node* offset, Node* old_value,
Node* new_value) {
return raw_assembler()->AtomicCompareExchange(type, base, offset, old_value,
new_value);
}
Node* CodeAssembler::StoreRoot(Heap::RootListIndex root_index, Node* value) {
DCHECK(Heap::RootCanBeWrittenAfterInitialization(root_index));
Node* roots_array_start =
......
......@@ -278,6 +278,10 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Exchange value at raw memory location
Node* AtomicExchange(MachineType type, Node* base, Node* offset, Node* value);
// Compare and Exchange value at raw memory location
Node* AtomicCompareExchange(MachineType type, Node* base, Node* offset,
Node* old_value, Node* new_value);
// Store a value to the root array.
Node* StoreRoot(Heap::RootListIndex root_index, Node* value);
......
......@@ -1987,6 +1987,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ xchg(i.InputRegister(0), i.MemoryOperand(1));
break;
}
case kAtomicCompareExchangeInt8: {
__ lock();
__ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1));
__ movsx_b(eax, eax);
break;
}
case kAtomicCompareExchangeUint8: {
__ lock();
__ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1));
__ movzx_b(eax, eax);
break;
}
case kAtomicCompareExchangeInt16: {
__ lock();
__ cmpxchg_w(i.MemoryOperand(2), i.InputRegister(1));
__ movsx_w(eax, eax);
break;
}
case kAtomicCompareExchangeUint16: {
__ lock();
__ cmpxchg_w(i.MemoryOperand(2), i.InputRegister(1));
__ movzx_w(eax, eax);
break;
}
case kAtomicCompareExchangeWord32: {
__ lock();
__ cmpxchg(i.MemoryOperand(2), i.InputRegister(1));
break;
}
case kAtomicLoadInt8:
case kAtomicLoadUint8:
case kAtomicLoadInt16:
......
......@@ -1777,6 +1777,48 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
IA32OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
MachineType type = AtomicCompareExchangeRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
return;
}
InstructionOperand outputs[1];
AddressingMode addressing_mode;
InstructionOperand inputs[4];
size_t input_count = 0;
inputs[input_count++] = g.UseFixed(old_value, eax);
inputs[input_count++] = g.UseUniqueRegister(new_value);
inputs[input_count++] = g.UseUniqueRegister(base);
if (g.CanBeImmediate(index)) {
inputs[input_count++] = g.UseImmediate(index);
addressing_mode = kMode_MRI;
} else {
inputs[input_count++] = g.UseUniqueRegister(index);
addressing_mode = kMode_MR1;
}
outputs[0] = g.DefineAsFixed(node, eax);
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, 1, outputs, input_count, inputs);
}
void InstructionSelector::VisitInt32x4Splat(Node* node) {
VisitRO(this, node, kIA32Int32x4Splat);
}
......
......@@ -94,6 +94,11 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
V(AtomicExchangeInt16) \
V(AtomicExchangeUint16) \
V(AtomicExchangeWord32) \
V(AtomicCompareExchangeInt8) \
V(AtomicCompareExchangeUint8) \
V(AtomicCompareExchangeInt16) \
V(AtomicCompareExchangeUint16) \
V(AtomicCompareExchangeWord32) \
V(Ieee754Float64Acos) \
V(Ieee754Float64Acosh) \
V(Ieee754Float64Asin) \
......
......@@ -331,6 +331,11 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kAtomicExchangeInt16:
case kAtomicExchangeUint16:
case kAtomicExchangeWord32:
case kAtomicCompareExchangeInt8:
case kAtomicCompareExchangeUint8:
case kAtomicCompareExchangeInt16:
case kAtomicCompareExchangeUint16:
case kAtomicCompareExchangeWord32:
return kHasSideEffect;
#define CASE(Name) case k##Name:
......
......@@ -1463,6 +1463,11 @@ void InstructionSelector::VisitNode(Node* node) {
MarkAsRepresentation(type.representation(), node);
return VisitAtomicExchange(node);
}
case IrOpcode::kAtomicCompareExchange: {
MachineType type = AtomicCompareExchangeRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
return VisitAtomicCompareExchange(node);
}
case IrOpcode::kProtectedLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
......
......@@ -151,6 +151,11 @@ class MachineRepresentationInferrer {
representation_vector_[node->id()] = PromoteRepresentation(
AtomicExchangeRepresentationOf(node->op()).representation());
break;
case IrOpcode::kAtomicCompareExchange:
representation_vector_[node->id()] = PromoteRepresentation(
AtomicCompareExchangeRepresentationOf(node->op())
.representation());
break;
case IrOpcode::kStore:
case IrOpcode::kProtectedStore:
representation_vector_[node->id()] = PromoteRepresentation(
......@@ -460,6 +465,24 @@ class MachineRepresentationChecker {
node, 2, inferrer_->GetRepresentation(node));
}
break;
case IrOpcode::kAtomicCompareExchange:
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputRepresentationIs(
node, 1, MachineType::PointerRepresentation());
switch (inferrer_->GetRepresentation(node)) {
case MachineRepresentation::kTagged:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTaggedSigned:
CheckValueInputIsTagged(node, 2);
CheckValueInputIsTagged(node, 3);
break;
default:
CheckValueInputRepresentationIs(
node, 2, inferrer_->GetRepresentation(node));
CheckValueInputRepresentationIs(
node, 3, inferrer_->GetRepresentation(node));
}
break;
case IrOpcode::kPhi:
switch (inferrer_->GetRepresentation(node)) {
case MachineRepresentation::kTagged:
......
......@@ -85,6 +85,11 @@ MachineType AtomicExchangeRepresentationOf(Operator const* op) {
return OpParameter<MachineType>(op);
}
MachineType AtomicCompareExchangeRepresentationOf(Operator const* op) {
DCHECK_EQ(IrOpcode::kAtomicCompareExchange, op->opcode());
return OpParameter<MachineType>(op);
}
#define PURE_BINARY_OP_LIST_32(V) \
V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Word32Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
......@@ -607,6 +612,19 @@ struct MachineOperatorGlobalCache {
ATOMIC_TYPE_LIST(ATOMIC_EXCHANGE)
#undef ATOMIC_EXCHANGE
#define ATOMIC_COMPARE_EXCHANGE(Type) \
struct AtomicCompareExchange##Type##Operator \
: public Operator1<MachineType> { \
AtomicCompareExchange##Type##Operator() \
: Operator1<MachineType>(IrOpcode::kAtomicCompareExchange, \
Operator::kNoDeopt | Operator::kNoThrow, \
"AtomicCompareExchange", 4, 1, 1, 1, 1, 0, \
MachineType::Type()) {} \
}; \
AtomicCompareExchange##Type##Operator kAtomicCompareExchange##Type;
ATOMIC_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
#undef ATOMIC_COMPARE_EXCHANGE
// The {BitcastWordToTagged} operator must not be marked as pure (especially
// not idempotent), because otherwise the splitting logic in the Scheduler
// might decide to split these operators, thus potentially creating live
......@@ -874,6 +892,17 @@ const Operator* MachineOperatorBuilder::AtomicExchange(MachineType rep) {
return nullptr;
}
const Operator* MachineOperatorBuilder::AtomicCompareExchange(MachineType rep) {
#define COMPARE_EXCHANGE(kRep) \
if (rep == MachineType::kRep()) { \
return &cache_.kAtomicCompareExchange##kRep; \
}
ATOMIC_TYPE_LIST(COMPARE_EXCHANGE)
#undef COMPARE_EXCHANGE
UNREACHABLE();
return nullptr;
}
#define SIMD_LANE_OPS(Type, lane_count) \
const Operator* MachineOperatorBuilder::Type##ExtractLane( \
int32_t lane_index) { \
......
......@@ -99,6 +99,8 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op);
MachineType AtomicExchangeRepresentationOf(Operator const* op);
MachineType AtomicCompareExchangeRepresentationOf(Operator const* op);
// Interface for building machine-level operators. These operators are
// machine-level but machine-independent and thus define a language suitable
// for generating code to run on architectures such as ia32, x64, arm, etc.
......@@ -611,6 +613,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* AtomicStore(MachineRepresentation rep);
// atomic-exchange [base + index], value
const Operator* AtomicExchange(MachineType rep);
// atomic-compare-exchange [base + index], old_value, new_value
const Operator* AtomicCompareExchange(MachineType rep);
// Target machine word-size assumed by this builder.
bool Is32() const { return word() == MachineRepresentation::kWord32; }
......
......@@ -1595,6 +1595,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicExchangeInt16:
case kAtomicExchangeUint16:
case kAtomicExchangeWord32:
case kAtomicCompareExchangeInt8:
case kAtomicCompareExchangeUint8:
case kAtomicCompareExchangeInt16:
case kAtomicCompareExchangeUint16:
case kAtomicCompareExchangeWord32:
UNREACHABLE();
break;
}
......
......@@ -1887,6 +1887,10 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
void InstructionSelector::VisitAtomicExchange(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
UNIMPLEMENTED();
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
......
......@@ -1921,6 +1921,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicExchangeInt16:
case kAtomicExchangeUint16:
case kAtomicExchangeWord32:
case kAtomicCompareExchangeInt8:
case kAtomicCompareExchangeUint8:
case kAtomicCompareExchangeInt16:
case kAtomicCompareExchangeUint16:
case kAtomicCompareExchangeWord32:
UNREACHABLE();
break;
case kMips64AssertEqual:
......
......@@ -2638,6 +2638,10 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
void InstructionSelector::VisitAtomicExchange(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
UNIMPLEMENTED();
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
......
......@@ -554,6 +554,7 @@
V(AtomicLoad) \
V(AtomicStore) \
V(AtomicExchange) \
V(AtomicCompareExchange) \
V(UnsafePointerAdd)
#define MACHINE_SIMD_OP_LIST(V) \
......
......@@ -1983,6 +1983,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicExchangeInt16:
case kAtomicExchangeUint16:
case kAtomicExchangeWord32:
case kAtomicCompareExchangeInt8:
case kAtomicCompareExchangeUint8:
case kAtomicCompareExchangeInt16:
case kAtomicCompareExchangeUint16:
case kAtomicCompareExchangeWord32:
UNREACHABLE();
break;
default:
......
......@@ -2119,6 +2119,10 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
void InstructionSelector::VisitAtomicExchange(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
UNIMPLEMENTED();
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
......
......@@ -179,6 +179,12 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
return AddNode(machine()->AtomicExchange(rep), base, index, value);
}
Node* AtomicCompareExchange(MachineType rep, Node* base, Node* index,
Node* old_value, Node* new_value) {
return AddNode(machine()->AtomicCompareExchange(rep), base, index,
old_value, new_value);
}
// Arithmetic Operations.
Node* WordAnd(Node* a, Node* b) {
return AddNode(machine()->WordAnd(), a, b);
......
......@@ -2414,6 +2414,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bne(&do_cs, Label::kNear);
break;
}
case kAtomicCompareExchangeInt8:
case kAtomicCompareExchangeUint8:
case kAtomicCompareExchangeInt16:
case kAtomicCompareExchangeUint16:
case kAtomicCompareExchangeWord32: {
UNREACHABLE();
break;
}
default:
UNREACHABLE();
break;
......
......@@ -2431,6 +2431,10 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
UNIMPLEMENTED();
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
......
......@@ -1418,6 +1418,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kAtomicLoad:
case IrOpcode::kAtomicStore:
case IrOpcode::kAtomicExchange:
case IrOpcode::kAtomicCompareExchange:
#define SIMD_MACHINE_OP_CASE(Name) case IrOpcode::k##Name:
MACHINE_SIMD_OP_LIST(SIMD_MACHINE_OP_CASE)
......
......@@ -2301,6 +2301,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ xchgl(i.InputRegister(0), i.MemoryOperand(1));
break;
}
case kAtomicCompareExchangeInt8: {
__ lock();
__ cmpxchgb(i.MemoryOperand(2), i.InputRegister(1));
__ movsxbl(rax, rax);
break;
}
case kAtomicCompareExchangeUint8: {
__ lock();
__ cmpxchgb(i.MemoryOperand(2), i.InputRegister(1));
__ movzxbl(rax, rax);
break;
}
case kAtomicCompareExchangeInt16: {
__ lock();
__ cmpxchgw(i.MemoryOperand(2), i.InputRegister(1));
__ movsxwl(rax, rax);
break;
}
case kAtomicCompareExchangeUint16: {
__ lock();
__ cmpxchgw(i.MemoryOperand(2), i.InputRegister(1));
__ movzxwl(rax, rax);
break;
}
case kAtomicCompareExchangeWord32: {
__ lock();
__ cmpxchgl(i.MemoryOperand(2), i.InputRegister(1));
break;
}
case kAtomicLoadInt8:
case kAtomicLoadUint8:
case kAtomicLoadInt16:
......
......@@ -2339,6 +2339,48 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
X64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
MachineType type = AtomicCompareExchangeRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
return;
}
InstructionOperand outputs[1];
AddressingMode addressing_mode;
InstructionOperand inputs[4];
size_t input_count = 0;
inputs[input_count++] = g.UseFixed(old_value, rax);
inputs[input_count++] = g.UseUniqueRegister(new_value);
inputs[input_count++] = g.UseUniqueRegister(base);
if (g.CanBeImmediate(index)) {
inputs[input_count++] = g.UseImmediate(index);
addressing_mode = kMode_MRI;
} else {
inputs[input_count++] = g.UseUniqueRegister(index);
addressing_mode = kMode_MR1;
}
outputs[0] = g.DefineAsFixed(node, rax);
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, 1, outputs, input_count, inputs);
}
#define SIMD_TYPES(V) V(Int32x4)
#define SIMD_ZERO_OP_LIST(V) \
......
......@@ -6897,7 +6897,8 @@ class Script: public Struct {
#define ATOMIC_FUNCTIONS_WITH_ID_LIST(V) \
V(Atomics, load, AtomicsLoad) \
V(Atomics, store, AtomicsStore) \
V(Atomics, exchange, AtomicsExchange)
V(Atomics, exchange, AtomicsExchange) \
V(Atomics, compareExchange, AtomicsCompareExchange)
enum BuiltinFunctionId {
kArrayCode,
......
......@@ -26,15 +26,29 @@ inline T ExchangeSeqCst(T* p, T value) {
return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST);
}
template <typename T>
inline T CompareExchangeSeqCst(T* p, T oldval, T newval) {
(void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST,
__ATOMIC_SEQ_CST);
return oldval;
}
#elif V8_CC_MSVC
#define InterlockedExchange32 _InterlockedExchange
#define InterlockedCompareExchange32 _InterlockedCompareExchange
#define InterlockedCompareExchange8 _InterlockedCompareExchange8
#define ATOMIC_OPS(type, suffix, vctype) \
inline type ExchangeSeqCst(type* p, type value) { \
return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \
bit_cast<vctype>(value)); \
} \
inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \
return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \
bit_cast<vctype>(newval), \
bit_cast<vctype>(oldval)); \
}
ATOMIC_OPS(int8_t, 8, char)
ATOMIC_OPS(uint8_t, 8, char)
......@@ -47,6 +61,8 @@ ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */
#undef ATOMIC_OPS
#undef InterlockedExchange32
#undef InterlockedCompareExchange32
#undef InterlockedCompareExchange8
#else
......@@ -114,6 +130,16 @@ inline Object* DoExchange(Isolate* isolate, void* buffer, size_t index,
return ToObject(isolate, result);
}
template <typename T>
inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
Handle<Object> oldobj, Handle<Object> newobj) {
T oldval = FromObject<T>(oldobj);
T newval = FromObject<T>(newobj);
T result =
CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval);
return ToObject(isolate, result);
}
} // anonymous namespace
// Duplicated from objects.h
......@@ -178,6 +204,34 @@ RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(oldobj, 2);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(newobj, 3);
CHECK(sta->GetBuffer()->is_shared());
CHECK_LT(index, NumberToSize(sta->length()));
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
NumberToSize(sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
case kExternal##Type##Array: \
return DoCompareExchange<ctype>(isolate, source, index, oldobj, newobj);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
default:
break;
}
UNREACHABLE();
return isolate->heap()->undefined_value();
}
} // namespace internal
} // namespace v8
......@@ -64,6 +64,7 @@ namespace internal {
F(ThrowNotInt32SharedTypedArrayError, 1, 1) \
F(ThrowInvalidAtomicAccessIndexError, 0, 1) \
F(AtomicsExchange, 3, 1) \
F(AtomicsCompareExchange, 4, 1) \
F(AtomicsNumWaitersForTesting, 2, 1) \
F(SetAllowAtomicsWait, 1, 1)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment