Commit a9f5f3d6 authored by erik.corry@gmail.com's avatar erik.corry@gmail.com

Fix the full compiler on ARM to always generate the same code

regardless of the detected CPU.  This is a requirement for the
debugger and the deoptimizer, which both expect that code from
the snapshot (compiled without VFP and ARM7) should have the
same layout as code compiled later.

This is another change to make snapshots more robust with
arbitrary code.
Review URL: https://chromiumcodereview.appspot.com/10824235

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@12287 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent f9aea9fc
......@@ -302,7 +302,8 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
: AssemblerBase(arg_isolate),
recorded_ast_id_(TypeFeedbackId::None()),
positions_recorder_(this),
emit_debug_code_(FLAG_debug_code) {
emit_debug_code_(FLAG_debug_code),
predictable_code_size_(false) {
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
......@@ -784,13 +785,14 @@ static bool fits_shifter(uint32_t imm32,
// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
// space. There is no guarantee that the relocated location can be similarly
// encoded.
bool Operand::must_use_constant_pool() const {
bool Operand::must_use_constant_pool(const Assembler* assembler) const {
if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
if (!Serializer::enabled()) {
Serializer::TooLateToEnableNow();
}
#endif // def DEBUG
if (assembler != NULL && assembler->predictable_code_size()) return true;
return Serializer::enabled();
} else if (rmode_ == RelocInfo::NONE) {
return false;
......@@ -799,16 +801,17 @@ bool Operand::must_use_constant_pool() const {
}
bool Operand::is_single_instruction(Instr instr) const {
bool Operand::is_single_instruction(const Assembler* assembler,
Instr instr) const {
if (rm_.is_valid()) return true;
uint32_t dummy1, dummy2;
if (must_use_constant_pool() ||
if (must_use_constant_pool(assembler) ||
!fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, or use of
// constant pool is required. For a mov instruction not setting the
// condition code additional instruction conventions can be used.
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
if (must_use_constant_pool() ||
if (must_use_constant_pool(assembler) ||
!CpuFeatures::IsSupported(ARMv7)) {
// mov instruction will be an ldr from constant pool (one instruction).
return true;
......@@ -842,7 +845,7 @@ void Assembler::addrmod1(Instr instr,
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
if (x.must_use_constant_pool() ||
if (x.must_use_constant_pool(this) ||
!fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, so load
// it first to register ip and change the original instruction to use ip.
......@@ -851,7 +854,7 @@ void Assembler::addrmod1(Instr instr,
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = Instruction::ConditionField(instr);
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
if (x.must_use_constant_pool() ||
if (x.must_use_constant_pool(this) ||
!CpuFeatures::IsSupported(ARMv7)) {
RecordRelocInfo(x.rmode_, x.imm32_);
ldr(rd, MemOperand(pc, 0), cond);
......@@ -863,7 +866,7 @@ void Assembler::addrmod1(Instr instr,
} else {
// If this is not a mov or mvn instruction we may still be able to avoid
// a constant pool entry by using mvn or movw.
if (!x.must_use_constant_pool() &&
if (!x.must_use_constant_pool(this) &&
(instr & kMovMvnMask) != kMovMvnPattern) {
mov(ip, x, LeaveCC, cond);
} else {
......@@ -1388,7 +1391,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
if (src.must_use_constant_pool() ||
if (src.must_use_constant_pool(this) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
// Immediate operand cannot be encoded, load it first to register ip.
RecordRelocInfo(src.rmode_, src.imm32_);
......
......@@ -424,8 +424,8 @@ class Operand BASE_EMBEDDED {
// the instruction this operand is used for is a MOV or MVN instruction the
// actual instruction to use is required for this calculation. For other
// instructions instr is ignored.
bool is_single_instruction(Instr instr = 0) const;
bool must_use_constant_pool() const;
bool is_single_instruction(const Assembler* assembler, Instr instr = 0) const;
bool must_use_constant_pool(const Assembler* assembler) const;
inline int32_t immediate() const {
ASSERT(!rm_.is_valid());
......@@ -648,8 +648,10 @@ class Assembler : public AssemblerBase {
// Overrides the default provided by FLAG_debug_code.
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
// Dummy for cross platform compatibility.
void set_predictable_code_size(bool value) { }
// Avoids using instructions that vary in size in unpredictable ways between
// the snapshot and the running VM. This is needed by the full compiler so
// that it can recompile code with debug support and fix the PC.
void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
......@@ -1170,6 +1172,8 @@ class Assembler : public AssemblerBase {
// Jump unconditionally to given label.
void jmp(Label* L) { b(L, al); }
bool predictable_code_size() const { return predictable_code_size_; }
// Check the code size generated from label to here.
int SizeOfCodeGeneratedSince(Label* label) {
return pc_offset() - label->pos();
......@@ -1450,7 +1454,10 @@ class Assembler : public AssemblerBase {
friend class BlockConstPoolScope;
PositionsRecorder positions_recorder_;
bool emit_debug_code_;
bool predictable_code_size_;
friend class PositionsRecorder;
friend class EnsureSpace;
};
......
......@@ -1859,11 +1859,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
void ToBooleanStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
// This stub uses VFP3 instructions.
CpuFeatures::Scope scope(VFP2);
Label patch;
const Register map = r9.is(tos_) ? r7 : r9;
const Register temp = map;
// undefined -> false.
CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
......@@ -1916,6 +1914,10 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
Label not_heap_number;
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
__ b(ne, &not_heap_number);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
__ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
__ VFPCompareAndSetFlags(d1, 0.0);
// "tos_" is a register, and contains a non zero value by default.
......@@ -1923,6 +1925,45 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
// FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
__ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO
__ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN
} else {
Label done, not_nan, not_zero;
__ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
// -0 maps to false:
__ bic(
temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE), SetCC);
__ b(ne, &not_zero);
// If exponent word is zero then the answer depends on the mantissa word.
__ ldr(tos_, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
__ jmp(&done);
// Check for NaN.
__ bind(&not_zero);
// We already zeroed the sign bit, now shift out the mantissa so we only
// have the exponent left.
__ mov(temp, Operand(temp, LSR, HeapNumber::kMantissaBitsInTopWord));
unsigned int shifted_exponent_mask =
HeapNumber::kExponentMask >> HeapNumber::kMantissaBitsInTopWord;
__ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE));
__ b(ne, &not_nan); // If exponent is not 0x7ff then it can't be a NaN.
// Reload exponent word.
__ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
__ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE));
// If mantissa is not zero then we have a NaN, so return 0.
__ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
__ b(ne, &done);
// Load mantissa word.
__ ldr(temp, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
__ cmp(temp, Operand(0, RelocInfo::NONE));
// If mantissa is not zero then we have a NaN, so return 0.
__ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
__ b(ne, &done);
__ bind(&not_nan);
__ mov(tos_, Operand(1, RelocInfo::NONE));
__ bind(&done);
}
__ Ret();
__ bind(&not_heap_number);
}
......
......@@ -73,7 +73,10 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
// We need calls to have a predictable size in the unoptimized code, but
// this is optimized code, so we don't have to have a predictable size.
int call_size_in_bytes =
MacroAssembler::CallSizeNotPredictableCodeSize(deopt_entry,
RelocInfo::NONE);
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
......
......@@ -673,18 +673,9 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true,
Label* if_false,
Label* fall_through) {
if (CpuFeatures::IsSupported(VFP2)) {
ToBooleanStub stub(result_register());
__ CallStub(&stub);
__ tst(result_register(), result_register());
} else {
// Call the runtime to find the boolean value of the source and then
// translate it into control flow to the pair of labels.
__ push(result_register());
__ CallRuntime(Runtime::kToBool, 1);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ cmp(r0, ip);
}
Split(ne, if_true, if_false, fall_through);
}
......
......@@ -137,7 +137,19 @@ int MacroAssembler::CallSize(
int size = 2 * kInstrSize;
Instr mov_instr = cond | MOV | LeaveCC;
intptr_t immediate = reinterpret_cast<intptr_t>(target);
if (!Operand(immediate, rmode).is_single_instruction(mov_instr)) {
if (!Operand(immediate, rmode).is_single_instruction(this, mov_instr)) {
size += kInstrSize;
}
return size;
}
int MacroAssembler::CallSizeNotPredictableCodeSize(
Address target, RelocInfo::Mode rmode, Condition cond) {
int size = 2 * kInstrSize;
Instr mov_instr = cond | MOV | LeaveCC;
intptr_t immediate = reinterpret_cast<intptr_t>(target);
if (!Operand(immediate, rmode).is_single_instruction(NULL, mov_instr)) {
size += kInstrSize;
}
return size;
......@@ -276,12 +288,12 @@ void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
Condition cond) {
if (!src2.is_reg() &&
!src2.must_use_constant_pool() &&
!src2.must_use_constant_pool(this) &&
src2.immediate() == 0) {
mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond);
} else if (!src2.is_single_instruction() &&
!src2.must_use_constant_pool() &&
} else if (!src2.is_single_instruction(this) &&
!src2.must_use_constant_pool(this) &&
CpuFeatures::IsSupported(ARMv7) &&
IsPowerOf2(src2.immediate() + 1)) {
ubfx(dst, src1, 0,
......@@ -296,7 +308,7 @@ void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
ASSERT(lsb < 32);
if (!CpuFeatures::IsSupported(ARMv7)) {
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
if (lsb != 0) {
......@@ -311,7 +323,7 @@ void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
ASSERT(lsb < 32);
if (!CpuFeatures::IsSupported(ARMv7)) {
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
int shift_up = 32 - lsb - width;
......@@ -339,7 +351,7 @@ void MacroAssembler::Bfi(Register dst,
ASSERT(lsb + width < 32);
ASSERT(!scratch.is(dst));
if (width == 0) return;
if (!CpuFeatures::IsSupported(ARMv7)) {
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
bic(dst, dst, Operand(mask));
and_(scratch, src, Operand((1 << width) - 1));
......@@ -353,7 +365,7 @@ void MacroAssembler::Bfi(Register dst,
void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
ASSERT(lsb < 32);
if (!CpuFeatures::IsSupported(ARMv7)) {
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
bic(dst, dst, Operand(mask));
} else {
......@@ -364,7 +376,7 @@ void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
Condition cond) {
if (!CpuFeatures::IsSupported(ARMv7)) {
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
ASSERT(!dst.is(pc) && !src.rm().is(pc));
ASSERT((satpos >= 0) && (satpos <= 31));
......@@ -672,7 +684,7 @@ void MacroAssembler::Ldrd(Register dst1, Register dst2,
ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
// Generate two ldr instructions if ldrd is not available.
if (CpuFeatures::IsSupported(ARMv7)) {
if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
CpuFeatures::Scope scope(ARMv7);
ldrd(dst1, dst2, src, cond);
} else {
......@@ -714,7 +726,7 @@ void MacroAssembler::Strd(Register src1, Register src2,
ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
// Generate two str instructions if strd is not available.
if (CpuFeatures::IsSupported(ARMv7)) {
if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
CpuFeatures::Scope scope(ARMv7);
strd(src1, src2, dst, cond);
} else {
......@@ -2586,7 +2598,7 @@ void MacroAssembler::EmitECMATruncate(Register result,
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src,
int num_least_bits) {
if (CpuFeatures::IsSupported(ARMv7)) {
if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
ubfx(dst, src, kSmiTagSize, num_least_bits);
} else {
mov(dst, Operand(src, ASR, kSmiTagSize));
......
......@@ -110,11 +110,12 @@ class MacroAssembler: public Assembler {
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
static int CallSize(Register target, Condition cond = al);
void Call(Register target, Condition cond = al);
static int CallSize(Address target,
int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
static int CallSizeNotPredictableCodeSize(Address target,
RelocInfo::Mode rmode,
Condition cond = al);
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
static int CallSize(Handle<Code> code,
int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
Condition cond = al);
......
......@@ -240,7 +240,8 @@ DEFINE_bool(interrupt_at_exit, false,
"insert an interrupt check at function exit")
DEFINE_bool(weighted_back_edges, false,
"weight back edges by jump distance for interrupt triggering")
DEFINE_int(interrupt_budget, 5900,
// 0x1700 fits in the immediate field of an ARM instruction.
DEFINE_int(interrupt_budget, 0x1700,
"execution budget before interrupt is triggered")
DEFINE_int(type_info_threshold, 15,
"percentage of ICs that must have type info to allow optimization")
......@@ -273,12 +274,12 @@ DEFINE_bool(enable_rdtsc, true,
"enable use of RDTSC instruction if available")
DEFINE_bool(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)")
DEFINE_bool(enable_vfp3, true,
DEFINE_bool(enable_vfp3, false,
"enable use of VFP3 instructions if available - this implies "
"enabling ARMv7 and VFP2 instructions (ARM only)")
DEFINE_bool(enable_vfp2, true,
DEFINE_bool(enable_vfp2, false,
"enable use of VFP2 instructions if available")
DEFINE_bool(enable_armv7, true,
DEFINE_bool(enable_armv7, false,
"enable use of ARMv7 instructions if available (ARM only)")
DEFINE_bool(enable_fpu, true,
"enable use of MIPS FPU instructions if available (MIPS only)")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment