Commit 1ff65e99 authored by zhengxing.li's avatar zhengxing.li Committed by Commit bot

X87: [Atomics] code stubs for atomic operations.

  port 5e9ddf6c (r35453)

  original commit message:
  Reland of (https://codereview.chromium.org/1617503003)

  * New atomic code stubs for x64, ia32, arm, arm64
  * Add convenience functions JumpIfNotValidSmiValue, JumpIfUintNotValidSmiValue
    to macro-assembler-ia32 (API based on x64 macro assembler)
  * Remove runtime implementation of Atomics.load, the code stub should always be
    called instead
  * Add new test to mjsunit atomics test; check that Smi values of different
    sizes are supported when possible, else fall back to HeapNumbers

  These changes were needed to add another codestub:
  * Bump kStubMajorKeyBits from 7 to 8
  * Reduce ScriptContextFieldStub::kSlotIndexBits from 13 to 12

BUG=

Review URL: https://codereview.chromium.org/1894923002

Cr-Commit-Position: refs/heads/master@{#35560}
parent 5ec339c4
......@@ -5507,6 +5507,196 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
return_value_operand, NULL);
}
namespace {
void GetTypedArrayBackingStore(MacroAssembler* masm, Register backing_store,
Register object, Register scratch) {
Label offset_is_not_smi, done;
__ mov(scratch, FieldOperand(object, JSTypedArray::kBufferOffset));
__ mov(backing_store,
FieldOperand(scratch, JSArrayBuffer::kBackingStoreOffset));
__ mov(scratch, FieldOperand(object, JSArrayBufferView::kByteOffsetOffset));
__ JumpIfNotSmi(scratch, &offset_is_not_smi, Label::kNear);
// Offset is smi.
__ SmiUntag(scratch);
__ add(backing_store, scratch);
__ jmp(&done, Label::kNear);
// Offset is a heap number.
__ bind(&offset_is_not_smi);
// __ movsd(xmm0, FieldOperand(scratch, HeapNumber::kValueOffset));
__ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
// __ cvttsd2si(scratch, xmm0);
__ TruncateX87TOSToI(scratch);
__ add(backing_store, scratch);
__ bind(&done);
}
void TypedArrayJumpTablePrologue(MacroAssembler* masm, Register object,
Register scratch, Register scratch2,
Label* table) {
__ mov(scratch, FieldOperand(object, JSObject::kElementsOffset));
__ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ sub(scratch, Immediate(static_cast<uint8_t>(FIXED_INT8_ARRAY_TYPE)));
__ Assert(above_equal, kOffsetOutOfRange);
__ jmp(Operand::JumpTable(scratch, times_4, table));
}
void TypedArrayJumpTableEpilogue(MacroAssembler* masm, Label* table, Label* i8,
Label* u8, Label* i16, Label* u16, Label* i32,
Label* u32, Label* u8c) {
STATIC_ASSERT(FIXED_UINT8_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 1);
STATIC_ASSERT(FIXED_INT16_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 2);
STATIC_ASSERT(FIXED_UINT16_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 3);
STATIC_ASSERT(FIXED_INT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 4);
STATIC_ASSERT(FIXED_UINT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 5);
STATIC_ASSERT(FIXED_FLOAT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 6);
STATIC_ASSERT(FIXED_FLOAT64_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 7);
STATIC_ASSERT(FIXED_UINT8_CLAMPED_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 8);
Label abort;
__ bind(table);
__ dd(i8); // Int8Array
__ dd(u8); // Uint8Array
__ dd(i16); // Int16Array
__ dd(u16); // Uint16Array
__ dd(i32); // Int32Array
__ dd(u32); // Uint32Array
__ dd(&abort); // Float32Array
__ dd(&abort); // Float64Array
__ dd(u8c); // Uint8ClampedArray
__ bind(&abort);
__ Abort(kNoReason);
}
void ReturnInteger32(MacroAssembler* masm, X87Register dst, Register value,
Register scratch, Label* use_heap_number) {
Label not_smi;
if (!value.is(eax)) {
__ mov(eax, value);
}
__ JumpIfNotValidSmiValue(eax, scratch, &not_smi, Label::kNear);
__ SmiTag(eax);
__ Ret();
__ bind(&not_smi);
// __ Cvtsi2sd(dst, eax);
__ push(eax);
__ fld_s(MemOperand(esp, 0));
__ pop(eax);
__ jmp(use_heap_number);
}
void ReturnUnsignedInteger32(MacroAssembler* masm, X87Register dst,
Register value, X87Register scratch,
Label* use_heap_number) {
Label not_smi;
if (!value.is(eax)) {
__ mov(eax, value);
}
__ JumpIfUIntNotValidSmiValue(eax, &not_smi, Label::kNear);
__ SmiTag(eax);
__ Ret();
__ bind(&not_smi);
// Convert [0, 2**32-1] -> [-2**31, 2**31-1].
__ add(eax, Immediate(-0x7fffffff - 1)); // -0x80000000 parses incorrectly.
// __ Cvtsi2sd(dst, eax);
__ push(eax);
__ fld_s(MemOperand(esp, 0));
__ pop(eax);
__ mov(eax, Immediate(0x4f000000)); // 2**31 as IEEE float
// __ movd(scratch, eax);
__ push(eax);
__ fld_s(MemOperand(esp, 0));
__ pop(eax);
// __ cvtss2sd(scratch, scratch);
__ sub(esp, Immediate(kDoubleSize));
__ fstp_s(MemOperand(esp, 0));
__ fld_s(MemOperand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
// __ addsd(dst, scratch);
__ faddp();
__ jmp(use_heap_number);
}
void ReturnAllocatedHeapNumber(MacroAssembler* masm, X87Register value,
Register scratch, Register scratch2) {
Label call_runtime;
__ AllocateHeapNumber(eax, scratch, scratch2, &call_runtime);
// __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), value);
__ fst_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
__ bind(&call_runtime);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
// __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), value);
__ fst_d(FieldOperand(eax, HeapNumber::kValueOffset));
}
__ Ret();
}
} // anonymous namespace
void AtomicsLoadStub::Generate(MacroAssembler* masm) {
Register object = edx;
Register index = eax; // Index is an untagged word32.
Register backing_store = ebx;
Label table;
GetTypedArrayBackingStore(masm, backing_store, object, ecx);
TypedArrayJumpTablePrologue(masm, object, ecx, esi, &table);
Label i8, u8, i16, u16, i32, u32;
__ bind(&i8);
__ mov_b(eax, Operand(backing_store, index, times_1, 0));
__ movsx_b(eax, eax);
__ SmiTag(eax);
__ Ret();
__ bind(&u8);
__ mov_b(eax, Operand(backing_store, index, times_1, 0));
__ movzx_b(eax, eax);
__ SmiTag(eax);
__ Ret();
__ bind(&i16);
__ mov_w(eax, Operand(backing_store, index, times_2, 0));
__ movsx_w(eax, eax);
__ SmiTag(eax);
__ Ret();
__ bind(&u16);
__ mov_w(eax, Operand(backing_store, index, times_2, 0));
__ movzx_w(eax, eax);
__ SmiTag(eax);
__ Ret();
Label use_heap_number;
__ bind(&i32);
__ mov(eax, Operand(backing_store, index, times_4, 0));
ReturnInteger32(masm, stX_0, eax, ecx, &use_heap_number);
__ bind(&u32);
__ mov(eax, Operand(backing_store, index, times_4, 0));
ReturnUnsignedInteger32(masm, stX_0, eax, stX_1, &use_heap_number);
__ bind(&use_heap_number);
ReturnAllocatedHeapNumber(masm, stX_0, ecx, edx);
TypedArrayJumpTableEpilogue(masm, &table, &i8, &u8, &i16, &u16, &i32, &u32,
&u8);
}
#undef __
......
......@@ -427,6 +427,15 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void AtomicsLoadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
edx, // the typedarray object
eax // the index to load (untagged)
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
} // namespace internal
} // namespace v8
......
......@@ -500,6 +500,23 @@ class MacroAssembler: public Assembler {
j(not_zero, not_smi_label, distance);
}
// Jump if the value cannot be represented by a smi.
inline void JumpIfNotValidSmiValue(Register value, Register scratch,
Label* on_invalid,
Label::Distance distance = Label::kFar) {
mov(scratch, value);
add(scratch, Immediate(0x40000000U));
j(sign, on_invalid, distance);
}
// Jump if the unsigned integer value cannot be represented by a smi.
inline void JumpIfUIntNotValidSmiValue(
Register value, Label* on_invalid,
Label::Distance distance = Label::kFar) {
cmp(value, Immediate(0x40000000U));
j(above_equal, on_invalid, distance);
}
void LoadInstanceDescriptors(Register map, Register descriptors);
void EnumLength(Register dst, Register map);
void NumberOfOwnDescriptors(Register dst, Register map);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment