Commit ce3f9afa authored by zhengxing.li's avatar zhengxing.li Committed by Commit bot

X87: [Atomics] Remove Atomics code stubs; use TF ops.

  port d412cfa2 (r35596)

  original commit message:
  Reland of (https://codereview.chromium.org/1891033002)

  This is a much cleaner solution, which won't require nearly as much
  architecture-specific code. Thanks bmeurer@!

BUG=

Review URL: https://codereview.chromium.org/1897143003

Cr-Commit-Position: refs/heads/master@{#35603}
parent d878eb7e
......@@ -1775,6 +1775,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kCheckedStoreWord64:
UNREACHABLE(); // currently unsupported checked int64 load/store.
break;
case kAtomicLoadInt8:
case kAtomicLoadUint8:
case kAtomicLoadInt16:
case kAtomicLoadUint16:
case kAtomicLoadWord32:
UNREACHABLE(); // Won't be generated by instruction selector.
break;
}
} // NOLINT(readability/fn_size)
......
......@@ -1590,6 +1590,14 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
void InstructionSelector::VisitAtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
load_rep.representation() == MachineRepresentation::kWord16 ||
load_rep.representation() == MachineRepresentation::kWord32);
USE(load_rep);
VisitLoad(node);
}
// static
MachineOperatorBuilder::Flags
......
......@@ -5507,197 +5507,6 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
return_value_operand, NULL);
}
namespace {
void GetTypedArrayBackingStore(MacroAssembler* masm, Register backing_store,
Register object, Register scratch) {
Label offset_is_not_smi, done;
__ mov(scratch, FieldOperand(object, JSTypedArray::kBufferOffset));
__ mov(backing_store,
FieldOperand(scratch, JSArrayBuffer::kBackingStoreOffset));
__ mov(scratch, FieldOperand(object, JSArrayBufferView::kByteOffsetOffset));
__ JumpIfNotSmi(scratch, &offset_is_not_smi, Label::kNear);
// Offset is smi.
__ SmiUntag(scratch);
__ add(backing_store, scratch);
__ jmp(&done, Label::kNear);
// Offset is a heap number.
__ bind(&offset_is_not_smi);
// __ movsd(xmm0, FieldOperand(scratch, HeapNumber::kValueOffset));
__ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
// __ cvttsd2si(scratch, xmm0);
__ TruncateX87TOSToI(scratch);
__ add(backing_store, scratch);
__ bind(&done);
}
void TypedArrayJumpTablePrologue(MacroAssembler* masm, Register object,
Register scratch, Register scratch2,
Label* table) {
__ mov(scratch, FieldOperand(object, JSObject::kElementsOffset));
__ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ sub(scratch, Immediate(static_cast<uint8_t>(FIXED_INT8_ARRAY_TYPE)));
__ Assert(above_equal, kOffsetOutOfRange);
__ jmp(Operand::JumpTable(scratch, times_4, table));
}
void TypedArrayJumpTableEpilogue(MacroAssembler* masm, Label* table, Label* i8,
Label* u8, Label* i16, Label* u16, Label* i32,
Label* u32, Label* u8c) {
STATIC_ASSERT(FIXED_UINT8_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 1);
STATIC_ASSERT(FIXED_INT16_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 2);
STATIC_ASSERT(FIXED_UINT16_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 3);
STATIC_ASSERT(FIXED_INT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 4);
STATIC_ASSERT(FIXED_UINT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 5);
STATIC_ASSERT(FIXED_FLOAT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 6);
STATIC_ASSERT(FIXED_FLOAT64_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 7);
STATIC_ASSERT(FIXED_UINT8_CLAMPED_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 8);
Label abort;
__ bind(table);
__ dd(i8); // Int8Array
__ dd(u8); // Uint8Array
__ dd(i16); // Int16Array
__ dd(u16); // Uint16Array
__ dd(i32); // Int32Array
__ dd(u32); // Uint32Array
__ dd(&abort); // Float32Array
__ dd(&abort); // Float64Array
__ dd(u8c); // Uint8ClampedArray
__ bind(&abort);
__ Abort(kNoReason);
}
void ReturnInteger32(MacroAssembler* masm, X87Register dst, Register value,
Register scratch, Label* use_heap_number) {
Label not_smi;
if (!value.is(eax)) {
__ mov(eax, value);
}
__ JumpIfNotValidSmiValue(eax, scratch, &not_smi, Label::kNear);
__ SmiTag(eax);
__ Ret();
__ bind(&not_smi);
// __ Cvtsi2sd(dst, eax);
__ push(eax);
__ fld_s(MemOperand(esp, 0));
__ pop(eax);
__ jmp(use_heap_number);
}
void ReturnUnsignedInteger32(MacroAssembler* masm, X87Register dst,
Register value, X87Register scratch,
Label* use_heap_number) {
Label not_smi;
if (!value.is(eax)) {
__ mov(eax, value);
}
__ JumpIfUIntNotValidSmiValue(eax, &not_smi, Label::kNear);
__ SmiTag(eax);
__ Ret();
__ bind(&not_smi);
// Convert [0, 2**32-1] -> [-2**31, 2**31-1].
__ add(eax, Immediate(-0x7fffffff - 1)); // -0x80000000 parses incorrectly.
// __ Cvtsi2sd(dst, eax);
__ push(eax);
__ fld_s(MemOperand(esp, 0));
__ pop(eax);
__ mov(eax, Immediate(0x4f000000)); // 2**31 as IEEE float
// __ movd(scratch, eax);
__ push(eax);
__ fld_s(MemOperand(esp, 0));
__ pop(eax);
// __ cvtss2sd(scratch, scratch);
__ sub(esp, Immediate(kDoubleSize));
__ fstp_s(MemOperand(esp, 0));
__ fld_s(MemOperand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
// __ addsd(dst, scratch);
__ faddp();
__ jmp(use_heap_number);
}
void ReturnAllocatedHeapNumber(MacroAssembler* masm, X87Register value,
Register scratch, Register scratch2) {
Label call_runtime;
__ AllocateHeapNumber(eax, scratch, scratch2, &call_runtime);
// __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), value);
__ fst_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
__ bind(&call_runtime);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
// __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), value);
__ fst_d(FieldOperand(eax, HeapNumber::kValueOffset));
}
__ Ret();
}
} // anonymous namespace
void AtomicsLoadStub::Generate(MacroAssembler* masm) {
Register object = edx;
Register index = eax; // Index is an untagged word32.
Register backing_store = ebx;
Label table;
GetTypedArrayBackingStore(masm, backing_store, object, ecx);
TypedArrayJumpTablePrologue(masm, object, ecx, esi, &table);
Label i8, u8, i16, u16, i32, u32;
__ bind(&i8);
__ mov_b(eax, Operand(backing_store, index, times_1, 0));
__ movsx_b(eax, eax);
__ SmiTag(eax);
__ Ret();
__ bind(&u8);
__ mov_b(eax, Operand(backing_store, index, times_1, 0));
__ movzx_b(eax, eax);
__ SmiTag(eax);
__ Ret();
__ bind(&i16);
__ mov_w(eax, Operand(backing_store, index, times_2, 0));
__ movsx_w(eax, eax);
__ SmiTag(eax);
__ Ret();
__ bind(&u16);
__ mov_w(eax, Operand(backing_store, index, times_2, 0));
__ movzx_w(eax, eax);
__ SmiTag(eax);
__ Ret();
Label use_heap_number;
__ bind(&i32);
__ mov(eax, Operand(backing_store, index, times_4, 0));
ReturnInteger32(masm, stX_0, eax, ecx, &use_heap_number);
__ bind(&u32);
__ mov(eax, Operand(backing_store, index, times_4, 0));
ReturnUnsignedInteger32(masm, stX_0, eax, stX_1, &use_heap_number);
__ bind(&use_heap_number);
ReturnAllocatedHeapNumber(masm, stX_0, ecx, edx);
TypedArrayJumpTableEpilogue(masm, &table, &i8, &u8, &i16, &u16, &i32, &u32,
&u8);
}
#undef __
} // namespace internal
......
......@@ -427,15 +427,6 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void AtomicsLoadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
edx, // the typedarray object
eax // the index to load (untagged)
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
} // namespace internal
} // namespace v8
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment