A64: Handle a few TODOs.

Below are notes about some of the TODOs handled.

* In Simulator::Debug:
    We haven't had issues with printing in the debugger for a while. If the
    feature turns out to be still lacking an issue can be raised.

* In SubStringStub::Generate:
    There is no need to jump there. The StringCharAtGenerator is asked to set
    the result in x0, and the return sequence following only uses two instructions.
    Also, we would need to jump after the native counter increment instead of the
    return_x0 label.

* In ArrayPushStub::Generate:
    Raised an internal issue to check and update the ARM code.

* In MacroAssembler::CopyBytes:
    The new code should be faster for bulk copies. Raised an internal issue to
    investigate and improve this function.

* In MacroAssembler::EnterExitFrame:
    Unless we actually find something is wrong, this is working and looks ok.
    Also fixed the comments about the FP registers.

* Comment for BumpSystemStackPointer:
    Removed the comment at the declaration site and updated the TODO in the
    implementation.

R=ulan@chromium.org

Review URL: https://codereview.chromium.org/196473021

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@20034 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 487ca9e3
......@@ -1824,10 +1824,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ Mov(x11, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
__ Ldr(x10, MemOperand(x11));
// TODO(all): Pushing the marker twice seems unnecessary.
// In this case perhaps we could push xzr in the slot for the context
// (see MAsm::EnterFrame).
__ Push(x13, x12, x12, x10);
__ Push(x13, xzr, x12, x10);
// Set up fp.
__ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset);
......@@ -1873,7 +1870,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// field in the JSEnv and return a failure sentinel. Coming in here the
// fp will be invalid because the PushTryHandler below sets it to 0 to
// signal the existence of the JSEntry frame.
// TODO(jbramley): Do this in the Assembler.
__ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
}
......@@ -4070,12 +4066,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ Bind(&update_instance_type);
__ Ldr(temp, FieldMemOperand(unpacked_string, HeapObject::kMapOffset));
__ Ldrb(input_type, FieldMemOperand(temp, Map::kInstanceTypeOffset));
// TODO(all): This generates "b #+0x4". Can these be optimised out?
__ B(&underlying_unpacked);
// Now control must go to &underlying_unpacked. Since the no code is generated
// before then we fall through instead of generating a useless branch.
__ Bind(&seq_or_external_string);
// Sequential or external string. Registers unpacked_string and input_string
// alias, so there's nothing to do here.
// Note that if code is added here, the above code must be updated.
// x0 result_string pointer to result string object (uninit)
// x1 result_length length of substring result
......@@ -4202,7 +4199,6 @@ void SubStringStub::Generate(MacroAssembler* masm) {
input_string, from, result_length, x0,
&runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
generator.GenerateFast(masm);
// TODO(jbramley): Why doesn't this jump to return_x0?
__ Drop(3);
__ Ret();
generator.SkipSlow(masm, &runtime);
......@@ -4439,7 +4435,6 @@ void ArrayPushStub::Generate(MacroAssembler* masm) {
Operand::UntagSmiAndScale(length, kPointerSizeLog2));
__ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex));
} else {
// TODO(all): ARM has a redundant cmp here.
__ B(gt, &call_builtin);
__ Peek(value, (argc - 1) * kPointerSize);
......@@ -5636,7 +5631,6 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
// TODO(all): Optimize this with stp and suchlike.
ASSERT(!AreAliased(x0, api_function_address));
// x0 = FunctionCallbackInfo&
// Arguments is after the return address.
......
......@@ -1273,9 +1273,10 @@ void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
ASSERT(!csp.Is(sp_));
// TODO(jbramley): Several callers rely on this not using scratch registers,
// so we use the assembler directly here. However, this means that large
// immediate values of 'space' cannot be handled cleanly. Once we implement
// our flexible scratch register idea, we could greatly simplify this
// function.
// immediate values of 'space' cannot be handled cleanly. (Only 24-bits
// immediates or values of 'space' that can be encoded in one instruction are
// accepted.) Once we implement our flexible scratch register idea, we could
// greatly simplify this function.
InstructionAccurateScope scope(this);
if ((space.IsImmediate()) && !is_uint12(space.immediate())) {
// The subtract instruction supports a 12-bit immediate, shifted left by
......
......@@ -737,7 +737,6 @@ void MacroAssembler::Abs(const Register& rd, const Register& rm,
void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
const CPURegister& src2, const CPURegister& src3) {
ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
ASSERT(src0.IsValid());
int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
int size = src0.SizeInBytes();
......@@ -747,6 +746,21 @@ void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
}
void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
const CPURegister& src2, const CPURegister& src3,
const CPURegister& src4, const CPURegister& src5,
const CPURegister& src6, const CPURegister& src7) {
ASSERT(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
int size = src0.SizeInBytes();
PrepareForPush(count, size);
PushHelper(4, size, src0, src1, src2, src3);
PushHelper(count - 4, size, src4, src5, src6, src7);
}
void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
const CPURegister& dst2, const CPURegister& dst3) {
// It is not valid to pop into the same register more than once in one
......@@ -2442,10 +2456,12 @@ void MacroAssembler::CopyBytes(Register dst,
Register length,
Register scratch,
CopyHint hint) {
ASSERT(!AreAliased(src, dst, length, scratch));
UseScratchRegisterScope temps(this);
Register tmp1 = temps.AcquireX();
Register tmp2 = temps.AcquireX();
ASSERT(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
ASSERT(!AreAliased(src, dst, csp));
// TODO(all): Implement a faster copy function, and use hint to determine
// which algorithm to use for copies.
if (emit_debug_code()) {
// Check copy length.
Cmp(length, 0);
......@@ -2459,14 +2475,33 @@ void MacroAssembler::CopyBytes(Register dst,
Assert(le, kCopyBuffersOverlap);
}
Label loop, done;
Cbz(length, &done);
Label short_copy, short_loop, bulk_loop, done;
Bind(&loop);
if ((hint == kCopyLong || hint == kCopyUnknown) && !FLAG_optimize_for_size) {
Register bulk_length = scratch;
int pair_size = 2 * kXRegSize;
int pair_mask = pair_size - 1;
Bic(bulk_length, length, pair_mask);
Cbz(bulk_length, &short_copy);
Bind(&bulk_loop);
Sub(bulk_length, bulk_length, pair_size);
Ldp(tmp1, tmp2, MemOperand(src, pair_size, PostIndex));
Stp(tmp1, tmp2, MemOperand(dst, pair_size, PostIndex));
Cbnz(bulk_length, &bulk_loop);
And(length, length, pair_mask);
}
Bind(&short_copy);
Cbz(length, &done);
Bind(&short_loop);
Sub(length, length, 1);
Ldrb(scratch, MemOperand(src, 1, PostIndex));
Strb(scratch, MemOperand(dst, 1, PostIndex));
Cbnz(length, &loop);
Ldrb(tmp1, MemOperand(src, 1, PostIndex));
Strb(tmp1, MemOperand(dst, 1, PostIndex));
Cbnz(length, &short_loop);
Bind(&done);
}
......@@ -2945,7 +2980,6 @@ void MacroAssembler::ExitFrameRestoreFPRegs() {
}
// TODO(jbramley): Check that we're handling the frame pointer correctly.
void MacroAssembler::EnterExitFrame(bool save_doubles,
const Register& scratch,
int extra_space) {
......@@ -2989,7 +3023,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
// fp -> fp[0]: CallerFP (old fp)
// fp[-8]: Space reserved for SPOffset.
// fp[-16]: CodeObject()
// jssp[-16 - fp_size]: Saved doubles (if save_doubles is true).
// fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
// jssp[8]: Extra space reserved for caller (if extra_space != 0).
// jssp -> jssp[0]: Space reserved for the return address.
......@@ -3001,7 +3035,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
// fp -> fp[0]: CallerFP (old fp)
// fp[-8]: Space reserved for SPOffset.
// fp[-16]: CodeObject()
// csp[...]: Saved doubles, if saved_doubles is true.
// fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
// csp[8]: Memory reserved for the caller if extra_space != 0.
// Alignment padding, if necessary.
// csp -> csp[0]: Space reserved for the return address.
......
......@@ -538,6 +538,10 @@ class MacroAssembler : public Assembler {
// the system stack pointer, these methods do not modify any other registers.
void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
void Push(const CPURegister& src0, const CPURegister& src1,
const CPURegister& src2, const CPURegister& src3,
const CPURegister& src4, const CPURegister& src5 = NoReg,
const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
......@@ -791,10 +795,6 @@ class MacroAssembler : public Assembler {
//
// This method asserts that StackPointer() is not csp, since the call does
// not make sense in that context.
//
// TODO(jbramley): Currently, this method can only accept values of 'space'
// that can be encoded in one instruction. Refer to the implementation for
// details.
inline void BumpSystemStackPointer(const Operand& space);
// Helpers ------------------------------------------------------------------
......
......@@ -304,12 +304,14 @@ void Simulator::CorruptAllCallerSavedCPURegisters() {
// Extending the stack by 2 * 64 bits is required for stack alignment purposes.
// TODO(all): Insert a marker in the extra space allocated on the stack.
uintptr_t Simulator::PushAddress(uintptr_t address) {
ASSERT(sizeof(uintptr_t) < 2 * kXRegSize);
intptr_t new_sp = sp() - 2 * kXRegSize;
uintptr_t* alignment_slot =
reinterpret_cast<uintptr_t*>(new_sp + kXRegSize);
memcpy(alignment_slot, &kSlotsZapValue, kPointerSize);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
*stack_slot = address;
memcpy(stack_slot, &address, kPointerSize);
set_sp(new_sp);
return new_sp;
}
......@@ -442,12 +444,6 @@ void Simulator::RunFrom(Instruction* start) {
}
void Simulator::CheckStackAlignment() {
// TODO(aleram): The sp alignment check to perform depends on the processor
// state. Check the specifications for more details.
}
// When the generated code calls an external reference we need to catch that in
// the simulator. The external reference will be a function compiled for the
// host architecture. We need to call that function instead of trying to
......@@ -1721,11 +1717,6 @@ uint64_t Simulator::ReverseBytes(uint64_t value, ReverseByteMode mode) {
void Simulator::VisitDataProcessing2Source(Instruction* instr) {
// TODO(mcapewel) move these to a higher level file, as they are global
// assumptions.
ASSERT((static_cast<int32_t>(-1) >> 1) == -1);
ASSERT((static_cast<uint32_t>(-1) >> 1) == 0x7FFFFFFF);
Shift shift_op = NO_SHIFT;
int64_t result = 0;
switch (instr->Mask(DataProcessing2SourceMask)) {
......@@ -3128,7 +3119,6 @@ void Simulator::Debug() {
} else if ((strcmp(cmd, "print") == 0) || (strcmp(cmd, "p") == 0)) {
if (argc == 2) {
if (strcmp(arg1, "all") == 0) {
// TODO(all): better support for printing in the debugger.
PrintRegisters(true);
PrintFPRegisters(true);
} else {
......
......@@ -929,16 +929,15 @@ Register LoadStubCompiler::CallbackHandlerFrontend(Handle<HeapType> type,
Label miss;
Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
// TODO(jbramely): HandlerFrontendHeader returns its result in scratch1(), so
// we can't use it below, but that isn't very obvious. Is there a better way
// of handling this?
// HandlerFrontendHeader can return its result into scratch1() so do not
// use it.
Register scratch2 = this->scratch2();
Register scratch3 = this->scratch3();
Register dictionary = this->scratch4();
ASSERT(!AreAliased(reg, scratch2, scratch3, dictionary));
if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
ASSERT(!AreAliased(reg, scratch2(), scratch3(), scratch4()));
// Load the properties dictionary.
Register dictionary = scratch4();
__ Ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
// Probe the dictionary.
......@@ -948,18 +947,18 @@ Register LoadStubCompiler::CallbackHandlerFrontend(Handle<HeapType> type,
&probe_done,
dictionary,
this->name(),
scratch2(),
scratch3());
scratch2,
scratch3);
__ Bind(&probe_done);
// If probing finds an entry in the dictionary, scratch3 contains the
// pointer into the dictionary. Check that the value is the callback.
Register pointer = scratch3();
Register pointer = scratch3;
const int kElementsStartOffset = NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
const int kValueOffset = kElementsStartOffset + kPointerSize;
__ Ldr(scratch2(), FieldMemOperand(pointer, kValueOffset));
__ Cmp(scratch2(), Operand(callback));
__ Ldr(scratch2, FieldMemOperand(pointer, kValueOffset));
__ Cmp(scratch2, Operand(callback));
__ B(ne, &miss);
}
......@@ -1141,12 +1140,15 @@ void LoadStubCompiler::GenerateLoadInterceptor(
void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
Label success;
UseScratchRegisterScope temps(masm());
// Check that the object is a boolean.
// TODO(all): Optimize this like LCodeGen::DoDeferredTaggedToI.
__ JumpIfRoot(object, Heap::kTrueValueRootIndex, &success);
__ JumpIfNotRoot(object, Heap::kFalseValueRootIndex, miss);
__ Bind(&success);
Register true_root = temps.AcquireX();
Register false_root = temps.AcquireX();
ASSERT(!AreAliased(object, true_root, false_root));
__ LoadTrueFalseRoots(true_root, false_root);
__ Cmp(object, true_root);
__ Ccmp(object, false_root, ZFlag, ne);
__ B(ne, miss);
}
......@@ -1162,12 +1164,12 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
// Stub never generated for non-global objects that require access checks.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
// TODO(jbramley): Make Push take more than four arguments and combine these
// two calls.
__ Push(receiver(), holder_reg);
// receiver() and holder_reg can alias.
ASSERT(!AreAliased(receiver(), scratch1(), scratch2(), value()));
ASSERT(!AreAliased(holder_reg, scratch1(), scratch2(), value()));
__ Mov(scratch1(), Operand(callback));
__ Mov(scratch2(), Operand(name));
__ Push(scratch1(), scratch2(), value());
__ Push(receiver(), holder_reg, scratch1(), scratch2(), value());
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
......
......@@ -41,6 +41,10 @@ R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
namespace v8 {
namespace internal {
// These are global assumptions in v8.
STATIC_ASSERT((static_cast<int32_t>(-1) >> 1) == -1);
STATIC_ASSERT((static_cast<uint32_t>(-1) >> 1) == 0x7FFFFFFF);
// Floating point representation.
static inline uint32_t float_to_rawbits(float value) {
uint32_t bits = 0;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment