Commit 89e9819c authored by mbrandy's avatar mbrandy Committed by Commit bot

PPC: [Atomics] code stubs for atomic operations

Port 5e9ddf6c

Original commit message:
    * New atomic code stubs for x64, ia32, arm, arm64
    * Add convenience functions JumpIfNotValidSmiValue, JumpIfUintNotValidSmiValue
      to macro-assembler-ia32 (API based on x64 macro assembler)
    * Remove runtime implementation of Atomics.load, the code stub should always be
      called instead
    * Add new test to mjsunit atomics test; check that Smi values of different
      sizes are supported when possible, else fall back to HeapNumbers

    These changes were needed to add another codestub:
    * Bump kStubMajorKeyBits from 7 to 8
    * Reduce ScriptContextFieldStub::kSlotIndexBits from 13 to 12

R=binji@chromium.org, joransiu@ca.ibm.com, jyan@ca.ibm.com, michael_dawson@ca.ibm.com, bjaideep@ca.ibm.com
BUG=v8:4614
LOG=N

Review URL: https://codereview.chromium.org/1887823002

Cr-Commit-Position: refs/heads/master@{#35462}
parent d7bb46d4
...@@ -5788,6 +5788,187 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) { ...@@ -5788,6 +5788,187 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
kStackUnwindSpace, NULL, return_value_operand, NULL); kStackUnwindSpace, NULL, return_value_operand, NULL);
} }
namespace {
void GetTypedArrayBackingStore(MacroAssembler* masm, Register backing_store,
Register object, Register scratch,
DoubleRegister double_scratch) {
Label offset_is_not_smi, done_offset;
__ LoadP(scratch, FieldMemOperand(object, JSTypedArray::kBufferOffset));
__ LoadP(backing_store,
FieldMemOperand(scratch, JSArrayBuffer::kBackingStoreOffset));
__ LoadP(scratch,
FieldMemOperand(object, JSArrayBufferView::kByteOffsetOffset));
__ JumpIfNotSmi(scratch, &offset_is_not_smi);
// offset is smi
__ SmiUntag(scratch);
__ b(&done_offset);
// offset is a heap number
__ bind(&offset_is_not_smi);
__ lfd(double_scratch, FieldMemOperand(scratch, HeapNumber::kValueOffset));
__ ConvertDoubleToInt64(double_scratch,
#if !V8_TARGET_ARCH_PPC64
r0,
#endif
scratch, double_scratch);
__ bind(&done_offset);
__ add(backing_store, backing_store, scratch);
}
void TypedArrayJumpTablePrologue(MacroAssembler* masm, Register object,
Register scratch, Register scratch2,
Label* table) {
__ LoadP(scratch, FieldMemOperand(object, JSObject::kElementsOffset));
__ LoadP(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
__ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ subi(scratch, scratch,
Operand(static_cast<uint8_t>(FIXED_INT8_ARRAY_TYPE)));
if (__ emit_debug_code()) {
__ cmpi(scratch, Operand::Zero());
__ Check(ge, kOffsetOutOfRange);
}
__ ShiftLeftImm(scratch, scratch, Operand(kPointerSizeLog2));
__ mov_label_addr(scratch2, table);
__ LoadPX(scratch, MemOperand(scratch2, scratch));
__ Jump(scratch);
}
void TypedArrayJumpTableEpilogue(MacroAssembler* masm, Label* table, Label* i8,
Label* u8, Label* i16, Label* u16, Label* i32,
Label* u32, Label* u8c) {
STATIC_ASSERT(FIXED_UINT8_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 1);
STATIC_ASSERT(FIXED_INT16_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 2);
STATIC_ASSERT(FIXED_UINT16_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 3);
STATIC_ASSERT(FIXED_INT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 4);
STATIC_ASSERT(FIXED_UINT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 5);
STATIC_ASSERT(FIXED_FLOAT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 6);
STATIC_ASSERT(FIXED_FLOAT64_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 7);
STATIC_ASSERT(FIXED_UINT8_CLAMPED_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 8);
Label abort;
__ bind(table);
__ emit_label_addr(i8); // Int8Array
__ emit_label_addr(u8); // Uint8Array
__ emit_label_addr(i16); // Int16Array
__ emit_label_addr(u16); // Uint16Array
__ emit_label_addr(i32); // Int32Array
__ emit_label_addr(u32); // Uint32Array
__ emit_label_addr(&abort); // Float32Array
__ emit_label_addr(&abort); // Float64Array
__ emit_label_addr(u8c); // Uint8ClampedArray
__ bind(&abort);
__ Abort(kNoReason);
}
#if !V8_TARGET_ARCH_PPC64
void ReturnInteger32(MacroAssembler* masm, DoubleRegister dst, Register value,
Label* use_heap_number) {
Label not_smi;
__ JumpIfNotSmiCandidate(value, r0, &not_smi);
__ SmiTag(r3, value);
__ blr();
__ bind(&not_smi);
__ ConvertIntToDouble(value, dst);
__ b(use_heap_number);
}
#endif
void ReturnUnsignedInteger32(MacroAssembler* masm, DoubleRegister dst,
Register value, Label* use_heap_number) {
Label not_smi;
__ JumpIfNotUnsignedSmiCandidate(value, r0, &not_smi);
__ SmiTag(r3, value);
__ blr();
__ bind(&not_smi);
__ ConvertUnsignedIntToDouble(value, dst);
__ b(use_heap_number);
}
void ReturnAllocatedHeapNumber(MacroAssembler* masm, DoubleRegister value,
Register scratch, Register scratch2,
Register scratch3) {
Label call_runtime;
__ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r3, scratch, scratch2, scratch3, &call_runtime);
__ stfd(value, FieldMemOperand(r3, HeapNumber::kValueOffset));
__ blr();
__ bind(&call_runtime);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
__ stfd(value, FieldMemOperand(r3, HeapNumber::kValueOffset));
}
__ blr();
}
} // anonymous namespace
void AtomicsLoadStub::Generate(MacroAssembler* masm) {
Register object = r4;
Register index = r3; // Index is an untagged word32.
Register backing_store = r5;
Label table, i8, u8, i16, u16, i32, u32;
GetTypedArrayBackingStore(masm, backing_store, object, r6, d0);
TypedArrayJumpTablePrologue(masm, object, r6, r7, &table);
__ bind(&i8);
__ lbzx(r3, MemOperand(backing_store, index));
__ lwsync();
__ extsb(r3, r3);
__ SmiTag(r3);
__ blr();
__ bind(&u8);
__ lbzx(r3, MemOperand(backing_store, index));
__ lwsync();
__ SmiTag(r3);
__ blr();
__ bind(&i16);
__ ShiftLeftImm(index, index, Operand(1));
__ lhax(r3, MemOperand(backing_store, index));
__ lwsync();
__ SmiTag(r3);
__ blr();
__ bind(&u16);
__ ShiftLeftImm(index, index, Operand(1));
__ lhzx(r3, MemOperand(backing_store, index));
__ lwsync();
__ SmiTag(r3);
__ blr();
Label use_heap_number;
__ bind(&i32);
__ ShiftLeftImm(index, index, Operand(2));
__ lwax(r3, MemOperand(backing_store, index));
__ lwsync();
#if V8_TARGET_ARCH_PPC64
__ SmiTag(r3);
__ blr();
#else
ReturnInteger32(masm, d0, r3, &use_heap_number);
#endif
__ bind(&u32);
__ ShiftLeftImm(index, index, Operand(2));
__ lwzx(r3, MemOperand(backing_store, index));
__ lwsync();
ReturnUnsignedInteger32(masm, d0, r3, &use_heap_number);
__ bind(&use_heap_number);
ReturnAllocatedHeapNumber(masm, d0, r4, r5, r6);
TypedArrayJumpTableEpilogue(masm, &table, &i8, &u8, &i16, &u16, &i32, &u32,
&u8);
}
#undef __ #undef __
} // namespace internal } // namespace internal
......
...@@ -420,6 +420,15 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific( ...@@ -420,6 +420,15 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
}; };
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void AtomicsLoadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r4, // the typedarray object
r3 // the index to load (untagged)
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment