Commit b8691a78 authored by erik.corry@gmail.com's avatar erik.corry@gmail.com

Clean up multi byte nop support on x64 to more closely match IA32.

Fix missing instruction in disassembler.
Fix wrong disassembly of multi-byte NOP on x64
Add test of disassembler on 64 bit!
Review URL: http://codereview.chromium.org/8773039

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10147 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 5ae1ddd7
...@@ -426,13 +426,7 @@ void Assembler::GetCode(CodeDesc* desc) { ...@@ -426,13 +426,7 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) { void Assembler::Align(int m) {
ASSERT(IsPowerOf2(m)); ASSERT(IsPowerOf2(m));
int delta = (m - (pc_offset() & (m - 1))) & (m - 1); int delta = (m - (pc_offset() & (m - 1))) & (m - 1);
while (delta >= 9) { Nop(delta);
nop(9);
delta -= 9;
}
if (delta > 0) {
nop(delta);
}
} }
...@@ -441,6 +435,15 @@ void Assembler::CodeTargetAlign() { ...@@ -441,6 +435,15 @@ void Assembler::CodeTargetAlign() {
} }
bool Assembler::IsNop(Address addr) {
Address a = addr;
while (*a == 0x66) a++;
if (*a == 0x90) return true;
if (a[0] == 0xf && a[1] == 0x1f) return true;
return false;
}
void Assembler::bind_to(Label* L, int pos) { void Assembler::bind_to(Label* L, int pos) {
ASSERT(!L->is_bound()); // Label may only be bound once. ASSERT(!L->is_bound()); // Label may only be bound once.
ASSERT(0 <= pos && pos <= pc_offset()); // Position must be valid. ASSERT(0 <= pos && pos <= pc_offset()); // Position must be valid.
...@@ -1763,7 +1766,7 @@ void Assembler::notl(Register dst) { ...@@ -1763,7 +1766,7 @@ void Assembler::notl(Register dst) {
} }
void Assembler::nop(int n) { void Assembler::Nop(int n) {
// The recommended muti-byte sequences of NOP instructions from the Intel 64 // The recommended muti-byte sequences of NOP instructions from the Intel 64
// and IA-32 Architectures Software Developer's Manual. // and IA-32 Architectures Software Developer's Manual.
// //
...@@ -1778,73 +1781,64 @@ void Assembler::nop(int n) { ...@@ -1778,73 +1781,64 @@ void Assembler::nop(int n) {
// 9 bytes 66 NOP DWORD ptr [EAX + EAX*1 + 66 0F 1F 84 00 00 00 00 // 9 bytes 66 NOP DWORD ptr [EAX + EAX*1 + 66 0F 1F 84 00 00 00 00
// 00000000H] 00H // 00000000H] 00H
ASSERT(1 <= n);
ASSERT(n <= 9);
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
switch (n) { while (n > 0) {
case 1: switch (n) {
emit(0x90); case 2:
return; emit(0x66);
case 2: case 1:
emit(0x66); emit(0x90);
emit(0x90); return;
return; case 3:
case 3: emit(0x0f);
emit(0x0f); emit(0x1f);
emit(0x1f); emit(0x00);
emit(0x00); return;
return; case 4:
case 4: emit(0x0f);
emit(0x0f); emit(0x1f);
emit(0x1f); emit(0x40);
emit(0x40); emit(0x00);
emit(0x00); return;
return; case 6:
case 5: emit(0x66);
emit(0x0f); case 5:
emit(0x1f); emit(0x0f);
emit(0x44); emit(0x1f);
emit(0x00); emit(0x44);
emit(0x00); emit(0x00);
return; emit(0x00);
case 6: return;
emit(0x66); case 7:
emit(0x0f); emit(0x0f);
emit(0x1f); emit(0x1f);
emit(0x44); emit(0x80);
emit(0x00); emit(0x00);
emit(0x00); emit(0x00);
return; emit(0x00);
case 7: emit(0x00);
emit(0x0f); return;
emit(0x1f); default:
emit(0x80); case 11:
emit(0x00); emit(0x66);
emit(0x00); n--;
emit(0x00); case 10:
emit(0x00); emit(0x66);
return; n--;
case 8: case 9:
emit(0x0f); emit(0x66);
emit(0x1f); n--;
emit(0x84); case 8:
emit(0x00); emit(0x0f);
emit(0x00); emit(0x1f);
emit(0x00); emit(0x84);
emit(0x00); emit(0x00);
emit(0x00); emit(0x00);
return; emit(0x00);
case 9: emit(0x00);
emit(0x66); emit(0x00);
emit(0x0f); n -= 8;
emit(0x1f); }
emit(0x84);
emit(0x00);
emit(0x00);
emit(0x00);
emit(0x00);
emit(0x00);
return;
} }
} }
......
...@@ -636,6 +636,7 @@ class Assembler : public AssemblerBase { ...@@ -636,6 +636,7 @@ class Assembler : public AssemblerBase {
// possible to align the pc offset to a multiple // possible to align the pc offset to a multiple
// of m, where m must be a power of 2. // of m, where m must be a power of 2.
void Align(int m); void Align(int m);
void Nop(int bytes = 1);
// Aligns code to something that's optimal for a jump target for the platform. // Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign(); void CodeTargetAlign();
...@@ -1154,7 +1155,6 @@ class Assembler : public AssemblerBase { ...@@ -1154,7 +1155,6 @@ class Assembler : public AssemblerBase {
void hlt(); void hlt();
void int3(); void int3();
void nop(); void nop();
void nop(int n);
void rdtsc(); void rdtsc();
void ret(int imm16); void ret(int imm16);
void setcc(Condition cc, Register reg); void setcc(Condition cc, Register reg);
...@@ -1398,7 +1398,7 @@ class Assembler : public AssemblerBase { ...@@ -1398,7 +1398,7 @@ class Assembler : public AssemblerBase {
return static_cast<int>(reloc_info_writer.pos() - pc_); return static_cast<int>(reloc_info_writer.pos() - pc_);
} }
static bool IsNop(Address addr) { return *addr == 0x90; } static bool IsNop(Address addr);
// Avoid overflows for displacements etc. // Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512*MB; static const int kMaximalBufferSize = 512*MB;
......
...@@ -264,9 +264,7 @@ void Debug::GenerateSlot(MacroAssembler* masm) { ...@@ -264,9 +264,7 @@ void Debug::GenerateSlot(MacroAssembler* masm) {
Label check_codesize; Label check_codesize;
__ bind(&check_codesize); __ bind(&check_codesize);
__ RecordDebugBreakSlot(); __ RecordDebugBreakSlot();
for (int i = 0; i < Assembler::kDebugBreakSlotLength; i++) { __ Nop(Assembler::kDebugBreakSlotLength);
__ nop();
}
ASSERT_EQ(Assembler::kDebugBreakSlotLength, ASSERT_EQ(Assembler::kDebugBreakSlotLength,
masm->SizeOfCodeGeneratedSince(&check_codesize)); masm->SizeOfCodeGeneratedSince(&check_codesize));
} }
......
...@@ -138,8 +138,8 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, ...@@ -138,8 +138,8 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
ASSERT(*(call_target_address - 3) == 0x73 && // jae ASSERT(*(call_target_address - 3) == 0x73 && // jae
*(call_target_address - 2) == 0x07 && // offset *(call_target_address - 2) == 0x07 && // offset
*(call_target_address - 1) == 0xe8); // call *(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x90; // nop *(call_target_address - 3) = 0x66; // 2 byte nop part 1
*(call_target_address - 2) = 0x90; // nop *(call_target_address - 2) = 0x90; // 2 byte nop part 2
Assembler::set_target_address_at(call_target_address, Assembler::set_target_address_at(call_target_address,
replacement_code->entry()); replacement_code->entry());
...@@ -157,8 +157,8 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code, ...@@ -157,8 +157,8 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
Assembler::target_address_at(call_target_address)); Assembler::target_address_at(call_target_address));
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
// restore the conditional branch. // restore the conditional branch.
ASSERT(*(call_target_address - 3) == 0x90 && // nop ASSERT(*(call_target_address - 3) == 0x66 && // 2 byte nop part 1
*(call_target_address - 2) == 0x90 && // nop *(call_target_address - 2) == 0x90 && // 2 byte nop part 2
*(call_target_address - 1) == 0xe8); // call *(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x73; // jae *(call_target_address - 3) = 0x73; // jae
*(call_target_address - 2) = 0x07; // offset *(call_target_address - 2) = 0x07; // offset
......
...@@ -109,6 +109,7 @@ static const ByteMnemonic zero_operands_instr[] = { ...@@ -109,6 +109,7 @@ static const ByteMnemonic zero_operands_instr[] = {
{ 0xC3, UNSET_OP_ORDER, "ret" }, { 0xC3, UNSET_OP_ORDER, "ret" },
{ 0xC9, UNSET_OP_ORDER, "leave" }, { 0xC9, UNSET_OP_ORDER, "leave" },
{ 0xF4, UNSET_OP_ORDER, "hlt" }, { 0xF4, UNSET_OP_ORDER, "hlt" },
{ 0xFC, UNSET_OP_ORDER, "cld" },
{ 0xCC, UNSET_OP_ORDER, "int3" }, { 0xCC, UNSET_OP_ORDER, "int3" },
{ 0x60, UNSET_OP_ORDER, "pushad" }, { 0x60, UNSET_OP_ORDER, "pushad" },
{ 0x61, UNSET_OP_ORDER, "popad" }, { 0x61, UNSET_OP_ORDER, "popad" },
...@@ -1034,7 +1035,18 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) { ...@@ -1034,7 +1035,18 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} }
} else { } else {
get_modrm(*current, &mod, &regop, &rm); get_modrm(*current, &mod, &regop, &rm);
if (opcode == 0x28) { if (opcode == 0x1f) {
current++;
if (rm == 4) { // SIB byte present.
current++;
}
if (mod == 1) { // Byte displacement.
current += 1;
} else if (mod == 2) { // 32-bit displacement.
current += 4;
} // else no immediate displacement.
AppendToBuffer("nop");
} else if (opcode == 0x28) {
AppendToBuffer("movapd %s, ", NameOfXMMRegister(regop)); AppendToBuffer("movapd %s, ", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current); current += PrintRightXMMOperand(current);
} else if (opcode == 0x29) { } else if (opcode == 0x29) {
...@@ -1178,7 +1190,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) { ...@@ -1178,7 +1190,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
int mod, regop, rm; int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm); get_modrm(*current, &mod, &regop, &rm);
current++; current++;
if (regop == 4) { // SIB byte present. if (rm == 4) { // SIB byte present.
current++; current++;
} }
if (mod == 1) { // Byte displacement. if (mod == 1) { // Byte displacement.
......
...@@ -4201,11 +4201,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { ...@@ -4201,11 +4201,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
int current_pc = masm()->pc_offset(); int current_pc = masm()->pc_offset();
if (current_pc < last_lazy_deopt_pc_ + space_needed) { if (current_pc < last_lazy_deopt_pc_ + space_needed) {
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
while (padding_size > 0) { __ Nop(padding_size);
int nop_size = padding_size > 9 ? 9 : padding_size;
__ nop(nop_size);
padding_size -= nop_size;
}
} }
} }
......
...@@ -110,7 +110,8 @@ SOURCES = { ...@@ -110,7 +110,8 @@ SOURCES = {
], ],
'arch:x64': ['test-assembler-x64.cc', 'arch:x64': ['test-assembler-x64.cc',
'test-macro-assembler-x64.cc', 'test-macro-assembler-x64.cc',
'test-log-stack-tracer.cc'], 'test-log-stack-tracer.cc',
'test-disasm-x64.cc'],
'arch:mips': ['test-assembler-mips.cc', 'arch:mips': ['test-assembler-mips.cc',
'test-disasm-mips.cc'], 'test-disasm-mips.cc'],
'os:linux': ['test-platform-linux.cc'], 'os:linux': ['test-platform-linux.cc'],
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include "cctest.h" #include "cctest.h"
using v8::internal::Assembler; using v8::internal::Assembler;
using v8::internal::Code;
using v8::internal::CodeDesc; using v8::internal::CodeDesc;
using v8::internal::FUNCTION_CAST; using v8::internal::FUNCTION_CAST;
using v8::internal::Immediate; using v8::internal::Immediate;
...@@ -53,6 +54,7 @@ using v8::internal::r15; ...@@ -53,6 +54,7 @@ using v8::internal::r15;
using v8::internal::r8; using v8::internal::r8;
using v8::internal::r9; using v8::internal::r9;
using v8::internal::rax; using v8::internal::rax;
using v8::internal::rbx;
using v8::internal::rbp; using v8::internal::rbp;
using v8::internal::rcx; using v8::internal::rcx;
using v8::internal::rdi; using v8::internal::rdi;
...@@ -86,6 +88,16 @@ static const v8::internal::Register arg2 = rsi; ...@@ -86,6 +88,16 @@ static const v8::internal::Register arg2 = rsi;
#define __ assm. #define __ assm.
static v8::Persistent<v8::Context> env;
static void InitializeVM() {
if (env.IsEmpty()) {
env = v8::Context::New();
}
}
TEST(AssemblerX64ReturnOperation) { TEST(AssemblerX64ReturnOperation) {
OS::Setup(); OS::Setup();
// Allocate an executable page of memory. // Allocate an executable page of memory.
...@@ -359,4 +371,73 @@ TEST(AssemblerX64LabelChaining) { ...@@ -359,4 +371,73 @@ TEST(AssemblerX64LabelChaining) {
__ nop(); __ nop();
} }
TEST(AssemblerMultiByteNop) {
InitializeVM();
v8::HandleScope scope;
v8::internal::byte buffer[1024];
Assembler assm(Isolate::Current(), buffer, sizeof(buffer));
__ push(rbx);
__ push(rcx);
__ push(rdx);
__ push(rdi);
__ push(rsi);
__ movq(rax, Immediate(1));
__ movq(rbx, Immediate(2));
__ movq(rcx, Immediate(3));
__ movq(rdx, Immediate(4));
__ movq(rdi, Immediate(5));
__ movq(rsi, Immediate(6));
for (int i = 0; i < 16; i++) {
int before = assm.pc_offset();
__ Nop(i);
CHECK_EQ(assm.pc_offset() - before, i);
}
Label fail;
__ cmpq(rax, Immediate(1));
__ j(not_equal, &fail);
__ cmpq(rbx, Immediate(2));
__ j(not_equal, &fail);
__ cmpq(rcx, Immediate(3));
__ j(not_equal, &fail);
__ cmpq(rdx, Immediate(4));
__ j(not_equal, &fail);
__ cmpq(rdi, Immediate(5));
__ j(not_equal, &fail);
__ cmpq(rsi, Immediate(6));
__ j(not_equal, &fail);
__ movq(rax, Immediate(42));
__ pop(rsi);
__ pop(rdi);
__ pop(rdx);
__ pop(rcx);
__ pop(rbx);
__ ret(0);
__ bind(&fail);
__ movq(rax, Immediate(13));
__ pop(rsi);
__ pop(rdi);
__ pop(rdx);
__ pop(rcx);
__ pop(rbx);
__ ret(0);
CodeDesc desc;
assm.GetCode(&desc);
Code* code = Code::cast(HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
v8::internal::Handle<v8::internal::Object>(
HEAP->undefined_value()))->ToObjectChecked());
CHECK(code->IsCode());
F0 f = FUNCTION_CAST<F0>(code->entry());
int res = f();
CHECK_EQ(42, res);
}
#undef __ #undef __
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment