Commit 2bd8d332 authored by lrn@chromium.org's avatar lrn@chromium.org

X64: Change strategy for spilling to match ia32. It's just better.

Align deferred code blocks to 16-byte address boundaries.

Review URL: http://codereview.chromium.org/2855018

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@4914 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 53340b26
......@@ -394,6 +394,11 @@ void Assembler::Align(int m) {
}
void Assembler::CodeTargetAlign() {
Align(16); // Tentative value.
}
bool Assembler::IsNop(Instr instr, int type) {
// Check for mov rx, rx.
ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
......
......@@ -703,6 +703,8 @@ class Assembler : public Malloced {
// possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4).
void Align(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
// Branch instructions
void b(int branch_offset, Condition cond = al);
......
......@@ -69,6 +69,7 @@ void CodeGenerator::ProcessDeferred() {
while (!deferred_.is_empty()) {
DeferredCode* code = deferred_.RemoveLast();
ASSERT(masm_ == code->masm());
masm_->CodeTargetAlign();
// Record position of deferred code stub.
masm_->RecordStatementPosition(code->statement_position());
if (code->position() != RelocInfo::kNoPosition) {
......
......@@ -378,6 +378,11 @@ void Assembler::Align(int m) {
}
void Assembler::CodeTargetAlign() {
Align(16); // Preferred alignment of jump targets on ia32.
}
void Assembler::cpuid() {
ASSERT(CpuFeatures::IsEnabled(CPUID));
EnsureSpace ensure_space(this);
......
......@@ -507,6 +507,8 @@ class Assembler : public Malloced {
// possible to align the pc offset to a multiple
// of m. m must be a power of 2.
void Align(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
// Stack
void pushad();
......
......@@ -382,6 +382,11 @@ void Assembler::Align(int m) {
}
void Assembler::CodeTargetAlign() {
Align(16); // Preferred alignment of jump targets on x64.
}
void Assembler::bind_to(Label* L, int pos) {
ASSERT(!L->is_bound()); // Label may only be bound once.
last_pc_ = NULL;
......
......@@ -499,6 +499,8 @@ class Assembler : public Malloced {
// possible to align the pc offset to a multiple
// of m. m must be a power of 2.
void Align(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
// Stack
void pushfq();
......
......@@ -456,13 +456,8 @@ void MacroAssembler::Set(Register dst, int64_t x) {
void MacroAssembler::Set(const Operand& dst, int64_t x) {
if (x == 0) {
xor_(kScratchRegister, kScratchRegister);
movq(dst, kScratchRegister);
} else if (is_int32(x)) {
if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
} else if (is_uint32(x)) {
movl(dst, Immediate(static_cast<uint32_t>(x)));
} else {
movq(kScratchRegister, x, RelocInfo::NONE);
movq(dst, kScratchRegister);
......
......@@ -961,16 +961,18 @@ void VirtualFrame::SyncRange(int begin, int end) {
// Sync elements below the range if they have not been materialized
// on the stack.
int start = Min(begin, stack_pointer_ + 1);
int end_or_stack_pointer = Min(stack_pointer_, end);
// Emit normal push instructions for elements above stack pointer
// and use mov instructions if we are below stack pointer.
int i = start;
// If positive we have to adjust the stack pointer.
int delta = end - stack_pointer_;
if (delta > 0) {
stack_pointer_ = end;
__ subq(rsp, Immediate(delta * kPointerSize));
}
for (int i = start; i <= end; i++) {
while (i <= end_or_stack_pointer) {
if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i);
i++;
}
while (i <= end) {
SyncElementByPushing(i);
i++;
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment