Commit 0e183035 authored by ager@chromium.org's avatar ager@chromium.org

ARM: Merging constants in simulator and assembler header files and other cleanup.

First stab at a general ARM cleanup patch. It merges ARM constants so that they can be used across simulator, assembler and disassembler, and tidies up some syntax and ambiguities.

BUG=none
TEST=none

Review URL: http://codereview.chromium.org/6274009

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@6483 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 591eb3f4
This diff is collapsed.
......@@ -41,6 +41,7 @@
#define V8_ARM_ASSEMBLER_ARM_H_
#include <stdio.h>
#include "assembler.h"
#include "constants-arm.h"
#include "serialize.h"
namespace v8 {
......@@ -300,18 +301,6 @@ const DwVfpRegister d13 = { 13 };
const DwVfpRegister d14 = { 14 };
const DwVfpRegister d15 = { 15 };
// VFP FPSCR constants.
static const uint32_t kVFPNConditionFlagBit = 1 << 31;
static const uint32_t kVFPZConditionFlagBit = 1 << 30;
static const uint32_t kVFPCConditionFlagBit = 1 << 29;
static const uint32_t kVFPVConditionFlagBit = 1 << 28;
static const uint32_t kVFPFlushToZeroMask = 1 << 24;
static const uint32_t kVFPRoundingModeMask = 3 << 22;
static const uint32_t kVFPRoundToMinusInfinityBits = 2 << 22;
static const uint32_t kVFPExceptionMask = 0xf;
// Coprocessor register
struct CRegister {
......@@ -372,149 +361,6 @@ enum Coprocessor {
};
// Condition field in instructions.
enum Condition {
// any value < 0 is considered no_condition
no_condition = -1,
eq = 0 << 28, // Z set equal.
ne = 1 << 28, // Z clear not equal.
nz = 1 << 28, // Z clear not zero.
cs = 2 << 28, // C set carry set.
hs = 2 << 28, // C set unsigned higher or same.
cc = 3 << 28, // C clear carry clear.
lo = 3 << 28, // C clear unsigned lower.
mi = 4 << 28, // N set negative.
pl = 5 << 28, // N clear positive or zero.
vs = 6 << 28, // V set overflow.
vc = 7 << 28, // V clear no overflow.
hi = 8 << 28, // C set, Z clear unsigned higher.
ls = 9 << 28, // C clear or Z set unsigned lower or same.
ge = 10 << 28, // N == V greater or equal.
lt = 11 << 28, // N != V less than.
gt = 12 << 28, // Z clear, N == V greater than.
le = 13 << 28, // Z set or N != V less then or equal
al = 14 << 28 // always.
};
// Returns the equivalent of !cc.
inline Condition NegateCondition(Condition cc) {
ASSERT(cc != al);
return static_cast<Condition>(cc ^ ne);
}
// Corresponds to transposing the operands of a comparison.
inline Condition ReverseCondition(Condition cc) {
switch (cc) {
case lo:
return hi;
case hi:
return lo;
case hs:
return ls;
case ls:
return hs;
case lt:
return gt;
case gt:
return lt;
case ge:
return le;
case le:
return ge;
default:
return cc;
};
}
// Branch hints are not used on the ARM. They are defined so that they can
// appear in shared function signatures, but will be ignored in ARM
// implementations.
enum Hint { no_hint };
// Hints are not used on the arm. Negating is trivial.
inline Hint NegateHint(Hint ignored) { return no_hint; }
// -----------------------------------------------------------------------------
// Addressing modes and instruction variants
// Shifter operand shift operation
enum ShiftOp {
LSL = 0 << 5,
LSR = 1 << 5,
ASR = 2 << 5,
ROR = 3 << 5,
RRX = -1
};
// Condition code updating mode
enum SBit {
SetCC = 1 << 20, // set condition code
LeaveCC = 0 << 20 // leave condition code unchanged
};
// Status register selection
enum SRegister {
CPSR = 0 << 22,
SPSR = 1 << 22
};
// Status register fields
enum SRegisterField {
CPSR_c = CPSR | 1 << 16,
CPSR_x = CPSR | 1 << 17,
CPSR_s = CPSR | 1 << 18,
CPSR_f = CPSR | 1 << 19,
SPSR_c = SPSR | 1 << 16,
SPSR_x = SPSR | 1 << 17,
SPSR_s = SPSR | 1 << 18,
SPSR_f = SPSR | 1 << 19
};
// Status register field mask (or'ed SRegisterField enum values)
typedef uint32_t SRegisterFieldMask;
// Memory operand addressing mode
enum AddrMode {
// bit encoding P U W
Offset = (8|4|0) << 21, // offset (without writeback to base)
PreIndex = (8|4|1) << 21, // pre-indexed addressing with writeback
PostIndex = (0|4|0) << 21, // post-indexed addressing with writeback
NegOffset = (8|0|0) << 21, // negative offset (without writeback to base)
NegPreIndex = (8|0|1) << 21, // negative pre-indexed with writeback
NegPostIndex = (0|0|0) << 21 // negative post-indexed with writeback
};
// Load/store multiple addressing mode
enum BlockAddrMode {
// bit encoding P U W
da = (0|0|0) << 21, // decrement after
ia = (0|4|0) << 21, // increment after
db = (8|0|0) << 21, // decrement before
ib = (8|4|0) << 21, // increment before
da_w = (0|0|1) << 21, // decrement after with writeback to base
ia_w = (0|4|1) << 21, // increment after with writeback to base
db_w = (8|0|1) << 21, // decrement before with writeback to base
ib_w = (8|4|1) << 21 // increment before with writeback to base
};
// Coprocessor load/store operand size
enum LFlag {
Long = 1 << 22, // long load/store coprocessor
Short = 0 << 22 // short load/store coprocessor
};
// -----------------------------------------------------------------------------
// Machine instruction Operands
......@@ -658,9 +504,6 @@ class CpuFeatures : public AllStatic {
};
typedef int32_t Instr;
extern const Instr kMovLrPc;
extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
......@@ -680,15 +523,11 @@ extern const Instr kMovwLeaveCCFlip;
extern const Instr kCmpCmnMask;
extern const Instr kCmpCmnPattern;
extern const Instr kCmpCmnFlip;
extern const Instr kALUMask;
extern const Instr kAddPattern;
extern const Instr kSubPattern;
extern const Instr kAndPattern;
extern const Instr kBicPattern;
extern const Instr kAddSubFlip;
extern const Instr kAndBicFlip;
class Assembler : public Malloced {
public:
// Create an assembler. Instructions and relocation information are emitted
......@@ -1001,7 +840,6 @@ class Assembler : public Malloced {
void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
// Exception-generating instructions and debugging support
static const int kDefaultStopCode = -1;
void stop(const char* msg,
Condition cond = al,
int32_t code = kDefaultStopCode);
......
......@@ -190,7 +190,7 @@ static void AllocateJSArray(MacroAssembler* masm,
// Check whether an empty sized array is requested.
__ tst(array_size, array_size);
__ b(nz, &not_empty);
__ b(ne, &not_empty);
// If an empty array is requested allocate a small elements array anyway. This
// keeps the code below free of special casing for the empty array.
......@@ -666,7 +666,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ mov(r2, Operand(debug_step_in_fp));
__ ldr(r2, MemOperand(r2));
__ tst(r2, r2);
__ b(nz, &rt_call);
__ b(ne, &rt_call);
#endif
// Load the initial map and verify that it is in fact a map.
......
......@@ -41,7 +41,7 @@ namespace internal {
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
Condition cc,
Condition cond,
bool never_nan_nan);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs,
......@@ -49,7 +49,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Label* lhs_not_nan,
Label* slow,
bool strict);
static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond);
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register lhs,
Register rhs);
......@@ -544,7 +544,7 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
Condition cc,
Condition cond,
bool never_nan_nan) {
Label not_identical;
Label heap_number, return_equal;
......@@ -553,31 +553,31 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// The two objects are identical. If we know that one of them isn't NaN then
// we now know they test equal.
if (cc != eq || !never_nan_nan) {
if (cond != eq || !never_nan_nan) {
// Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
// so we do the second best thing - test it ourselves.
// They are both equal and they are not both Smis so both of them are not
// Smis. If it's not a heap number, then return equal.
if (cc == lt || cc == gt) {
if (cond == lt || cond == gt) {
__ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
__ b(ge, slow);
} else {
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
__ b(eq, &heap_number);
// Comparing JS objects with <=, >= is complicated.
if (cc != eq) {
if (cond != eq) {
__ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
__ b(ge, slow);
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
if (cc == le || cc == ge) {
if (cond == le || cond == ge) {
__ cmp(r4, Operand(ODDBALL_TYPE));
__ b(ne, &return_equal);
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ cmp(r0, r2);
__ b(ne, &return_equal);
if (cc == le) {
if (cond == le) {
// undefined <= undefined should fail.
__ mov(r0, Operand(GREATER));
} else {
......@@ -591,20 +591,20 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
}
__ bind(&return_equal);
if (cc == lt) {
if (cond == lt) {
__ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
} else if (cc == gt) {
} else if (cond == gt) {
__ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
} else {
__ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
}
__ Ret();
if (cc != eq || !never_nan_nan) {
if (cond != eq || !never_nan_nan) {
// For less and greater we don't have to check for NaN since the result of
// x < x is false regardless. For the others here is some code to check
// for NaN.
if (cc != lt && cc != gt) {
if (cond != lt && cond != gt) {
__ bind(&heap_number);
// It is a heap number, so return non-equal if it's NaN and equal if it's
// not NaN.
......@@ -628,10 +628,10 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// if all bits in mantissa are zero (it's an Infinity) and non-zero if
// not (it's a NaN). For <= and >= we need to load r0 with the failing
// value if it's a NaN.
if (cc != eq) {
if (cond != eq) {
// All-zero means Infinity means equal.
__ Ret(eq);
if (cc == le) {
if (cond == le) {
__ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
} else {
__ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
......@@ -738,7 +738,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
}
void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) {
bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
Register rhs_exponent = exp_first ? r0 : r1;
Register lhs_exponent = exp_first ? r2 : r3;
......@@ -778,7 +778,7 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
__ bind(&one_is_nan);
// NaN comparisons always fail.
// Load whatever we need in r0 to make the comparison fail.
if (cc == lt || cc == le) {
if (cond == lt || cond == le) {
__ mov(r0, Operand(GREATER));
} else {
__ mov(r0, Operand(LESS));
......@@ -790,7 +790,8 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
// See comment at call site.
static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
Condition cond) {
bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
Register rhs_exponent = exp_first ? r0 : r1;
Register lhs_exponent = exp_first ? r2 : r3;
......@@ -798,7 +799,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
Register lhs_mantissa = exp_first ? r3 : r2;
// r0, r1, r2, r3 have the two doubles. Neither is a NaN.
if (cc == eq) {
if (cond == eq) {
// Doubles are not equal unless they have the same bit pattern.
// Exception: 0 and -0.
__ cmp(rhs_mantissa, Operand(lhs_mantissa));
......@@ -1087,7 +1088,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
} else if (FLAG_debug_code) {
__ orr(r2, r1, r0);
__ tst(r2, Operand(kSmiTagMask));
__ Assert(nz, "CompareStub: unexpected smi operands.");
__ Assert(ne, "CompareStub: unexpected smi operands.");
}
// NOTICE! This code is only reached after a smi-fast-case check, so
......@@ -3834,7 +3835,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
__ tst(regexp_data, Operand(kSmiTagMask));
__ Check(nz, "Unexpected type for RegExp data, FixedArray expected");
__ Check(ne, "Unexpected type for RegExp data, FixedArray expected");
__ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
__ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
}
......@@ -3937,7 +3938,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Is first part a flat string?
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r0, Operand(kStringRepresentationMask));
__ b(nz, &runtime);
__ b(ne, &runtime);
__ bind(&seq_string);
// subject: Subject string
......@@ -4385,13 +4386,13 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the first cons component is also non-flat, then go to runtime.
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(result_, Operand(kStringRepresentationMask));
__ b(nz, &call_runtime_);
__ b(ne, &call_runtime_);
// Check for 1-byte or 2-byte string.
__ bind(&flat_string);
STATIC_ASSERT(kAsciiStringTag != 0);
__ tst(result_, Operand(kStringEncodingMask));
__ b(nz, &ascii_string);
__ b(ne, &ascii_string);
// 2-byte string.
// Load the 2-byte character code into the result register. We can
......@@ -4476,7 +4477,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
__ tst(code_,
Operand(kSmiTagMask |
((~String::kMaxAsciiCharCode) << kSmiTagSize)));
__ b(nz, &slow_case_);
__ b(ne, &slow_case_);
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
// At this point code register contains smi tagged ascii char code.
......@@ -4923,7 +4924,7 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
__ add(hash, hash, Operand(hash, LSL, 15), SetCC);
// if (hash == 0) hash = 27;
__ mov(hash, Operand(27), LeaveCC, nz);
__ mov(hash, Operand(27), LeaveCC, ne);
}
......
......@@ -39,7 +39,7 @@ namespace internal {
// Platform-specific inline functions.
void DeferredCode::Jump() { __ jmp(&entry_label_); }
void DeferredCode::Branch(Condition cc) { __ b(cc, &entry_label_); }
void DeferredCode::Branch(Condition cond) { __ b(cond, &entry_label_); }
#undef __
......
......@@ -1589,7 +1589,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
}
void CodeGenerator::Comparison(Condition cc,
void CodeGenerator::Comparison(Condition cond,
Expression* left,
Expression* right,
bool strict) {
......@@ -1603,7 +1603,7 @@ void CodeGenerator::Comparison(Condition cc,
// result : cc register
// Strict only makes sense for equality comparisons.
ASSERT(!strict || cc == eq);
ASSERT(!strict || cond == eq);
Register lhs;
Register rhs;
......@@ -1614,8 +1614,8 @@ void CodeGenerator::Comparison(Condition cc,
// We load the top two stack positions into registers chosen by the virtual
// frame. This should keep the register shuffling to a minimum.
// Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
if (cc == gt || cc == le) {
cc = ReverseCondition(cc);
if (cond == gt || cond == le) {
cond = ReverseCondition(cond);
lhs_is_smi = frame_->KnownSmiAt(0);
rhs_is_smi = frame_->KnownSmiAt(1);
lhs = frame_->PopToRegister();
......@@ -1655,7 +1655,7 @@ void CodeGenerator::Comparison(Condition cc,
// Perform non-smi comparison by stub.
// CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
// We call with 0 args because there are 0 on the stack.
CompareStub stub(cc, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs);
CompareStub stub(cond, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs);
frame_->CallStub(&stub, 0);
__ cmp(r0, Operand(0, RelocInfo::NONE));
exit.Jump();
......@@ -1667,7 +1667,7 @@ void CodeGenerator::Comparison(Condition cc,
__ cmp(lhs, Operand(rhs));
exit.Bind();
cc_reg_ = cc;
cc_reg_ = cond;
}
......@@ -1885,8 +1885,8 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
ASSERT(has_cc());
Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
target->Branch(cc);
Condition cond = if_true ? cc_reg_ : NegateCondition(cc_reg_);
target->Branch(cond);
cc_reg_ = al;
}
......@@ -5572,7 +5572,7 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
deferred->Branch(lt);
__ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset));
__ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
deferred->Branch(nz);
deferred->Branch(ne);
// Check the object's elements are in fast case and writable.
__ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset));
......@@ -5589,7 +5589,7 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
__ mov(tmp2, index1);
__ orr(tmp2, tmp2, index2);
__ tst(tmp2, Operand(kSmiTagMask));
deferred->Branch(nz);
deferred->Branch(ne);
// Check that both indices are valid.
__ ldr(tmp2, FieldMemOperand(object, JSArray::kLengthOffset));
......
......@@ -32,12 +32,10 @@
#include "constants-arm.h"
namespace assembler {
namespace arm {
namespace v8 {
namespace internal {
namespace v8i = v8::internal;
double Instr::DoubleImmedVmov() const {
double Instruction::DoubleImmedVmov() const {
// Reconstruct a double from the immediate encoded in the vmov instruction.
//
// instruction: [xxxxxxxx,xxxxabcd,xxxxxxxx,xxxxefgh]
......@@ -149,6 +147,6 @@ int Registers::Number(const char* name) {
}
} } // namespace assembler::arm
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
This diff is collapsed.
......@@ -56,7 +56,7 @@ void CPU::FlushICache(void* start, size_t size) {
// that the Icache was flushed.
// None of this code ends up in the snapshot so there are no issues
// around whether or not to generate the code when building snapshots.
assembler::arm::Simulator::FlushICache(start, size);
Simulator::FlushICache(start, size);
#else
// Ideally, we would call
// syscall(__ARM_NR_cacheflush, start,
......
This diff is collapsed.
......@@ -517,16 +517,16 @@ void FullCodeGenerator::DoTest(Label* if_true,
}
void FullCodeGenerator::Split(Condition cc,
void FullCodeGenerator::Split(Condition cond,
Label* if_true,
Label* if_false,
Label* fall_through) {
if (if_false == fall_through) {
__ b(cc, if_true);
__ b(cond, if_true);
} else if (if_true == fall_through) {
__ b(NegateCondition(cc), if_false);
__ b(NegateCondition(cond), if_false);
} else {
__ b(cc, if_true);
__ b(cond, if_true);
__ b(if_false);
}
}
......@@ -3461,34 +3461,34 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
Condition cc = eq;
Condition cond = eq;
bool strict = false;
switch (op) {
case Token::EQ_STRICT:
strict = true;
// Fall through
case Token::EQ:
cc = eq;
cond = eq;
__ pop(r1);
break;
case Token::LT:
cc = lt;
cond = lt;
__ pop(r1);
break;
case Token::GT:
// Reverse left and right sides to obtain ECMA-262 conversion order.
cc = lt;
cond = lt;
__ mov(r1, result_register());
__ pop(r0);
break;
case Token::LTE:
// Reverse left and right sides to obtain ECMA-262 conversion order.
cc = ge;
cond = ge;
__ mov(r1, result_register());
__ pop(r0);
break;
case Token::GTE:
cc = ge;
cond = ge;
__ pop(r1);
break;
case Token::IN:
......@@ -3503,17 +3503,17 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ orr(r2, r0, Operand(r1));
__ JumpIfNotSmi(r2, &slow_case);
__ cmp(r1, r0);
Split(cc, if_true, if_false, NULL);
Split(cond, if_true, if_false, NULL);
__ bind(&slow_case);
}
CompareFlags flags = inline_smi_code
? NO_SMI_COMPARE_IN_STUB
: NO_COMPARE_FLAGS;
CompareStub stub(cc, strict, flags, r1, r0);
CompareStub stub(cond, strict, flags, r1, r0);
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ cmp(r0, Operand(0, RelocInfo::NONE));
Split(cc, if_true, if_false, fall_through);
Split(cond, if_true, if_false, fall_through);
}
}
......
......@@ -95,13 +95,13 @@ static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
__ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset));
__ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) |
(1 << Map::kHasNamedInterceptor)));
__ b(nz, miss);
__ b(ne, miss);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(t1, ip);
__ b(nz, miss);
__ b(ne, miss);
}
......@@ -427,7 +427,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
__ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
__ tst(scratch,
Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
__ b(nz, slow);
__ b(ne, slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing into string
......@@ -1674,7 +1674,7 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
return ge;
default:
UNREACHABLE();
return no_condition;
return kNoCondition;
}
}
......
......@@ -76,7 +76,7 @@ void JumpTarget::DoJump() {
}
void JumpTarget::DoBranch(Condition cc, Hint ignored) {
void JumpTarget::DoBranch(Condition cond, Hint ignored) {
ASSERT(cgen()->has_valid_frame());
if (entry_frame_set_) {
......@@ -86,7 +86,7 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) {
ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
}
// We have an expected frame to merge to on the backward edge.
cgen()->frame()->MergeTo(&entry_frame_, cc);
cgen()->frame()->MergeTo(&entry_frame_, cond);
} else {
// Clone the current frame to use as the expected one at the target.
set_entry_frame(cgen()->frame());
......@@ -98,8 +98,8 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) {
// frame with less precise type info branches to them.
ASSERT(direction_ != FORWARD_ONLY);
}
__ b(cc, &entry_label_);
if (cc == al) {
__ b(cond, &entry_label_);
if (cond == al) {
cgen()->DeleteFrame();
}
}
......
......@@ -661,7 +661,7 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
return;
}
if (cc == no_condition) {
if (cc == kNoCondition) {
if (FLAG_trap_on_deopt) __ stop("trap_on_deopt");
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
} else {
......@@ -1216,7 +1216,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ b(ne, &done);
if (instr->InputAt(1)->IsConstantOperand()) {
if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) < 0) {
DeoptimizeIf(no_condition, instr->environment());
DeoptimizeIf(kNoCondition, instr->environment());
}
} else {
// Test the non-zero operand for negative sign.
......@@ -1483,7 +1483,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (r.IsInteger32()) {
Register reg = ToRegister(instr->InputAt(0));
__ cmp(reg, Operand(0));
EmitBranch(true_block, false_block, nz);
EmitBranch(true_block, false_block, ne);
} else if (r.IsDouble()) {
DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
Register scratch = scratch0();
......@@ -1541,7 +1541,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ CallStub(&stub);
__ cmp(reg, Operand(0));
__ ldm(ia_w, sp, saved_regs);
EmitBranch(true_block, false_block, nz);
EmitBranch(true_block, false_block, ne);
}
}
}
......@@ -1593,7 +1593,7 @@ void LCodeGen::DoGoto(LGoto* instr) {
Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
Condition cond = no_condition;
Condition cond = kNoCondition;
switch (op) {
case Token::EQ:
case Token::EQ_STRICT:
......@@ -2136,7 +2136,7 @@ static Condition ComputeCompareCondition(Token::Value op) {
return ge;
default:
UNREACHABLE();
return no_condition;
return kNoCondition;
}
}
......@@ -3556,7 +3556,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Label* false_label,
Register input,
Handle<String> type_name) {
Condition final_branch_condition = no_condition;
Condition final_branch_condition = kNoCondition;
Register scratch = scratch0();
if (type_name->Equals(Heap::number_symbol())) {
__ tst(input, Operand(kSmiTagMask));
......@@ -3641,7 +3641,7 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
DeoptimizeIf(no_condition, instr->environment());
DeoptimizeIf(kNoCondition, instr->environment());
}
......
......@@ -318,7 +318,7 @@ void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
CheckConstPool(true, true);
add(pc, pc, Operand(index,
LSL,
assembler::arm::Instr::kInstrSizeLog2 - kSmiTagSize));
Instruction::kInstrSizeLog2 - kSmiTagSize));
BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
nop(); // Jump table alignment.
for (int i = 0; i < targets.length(); i++) {
......@@ -369,12 +369,12 @@ void MacroAssembler::RecordWriteHelper(Register object,
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cc,
Condition cond,
Label* branch) {
ASSERT(cc == eq || cc == ne);
ASSERT(cond == eq || cond == ne);
and_(scratch, object, Operand(ExternalReference::new_space_mask()));
cmp(scratch, Operand(ExternalReference::new_space_start()));
b(cc, branch);
b(cond, branch);
}
......@@ -926,7 +926,7 @@ void MacroAssembler::IsObjectJSStringType(Register object,
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
tst(scratch, Operand(kIsNotStringMask));
b(nz, fail);
b(ne, fail);
}
......@@ -1806,9 +1806,9 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
void MacroAssembler::Assert(Condition cc, const char* msg) {
void MacroAssembler::Assert(Condition cond, const char* msg) {
if (FLAG_debug_code)
Check(cc, msg);
Check(cond, msg);
}
......@@ -1841,9 +1841,9 @@ void MacroAssembler::AssertFastElements(Register elements) {
}
void MacroAssembler::Check(Condition cc, const char* msg) {
void MacroAssembler::Check(Condition cond, const char* msg) {
Label L;
b(cc, &L);
b(cond, &L);
Abort(msg);
// will not return here
bind(&L);
......
......@@ -139,7 +139,7 @@ class MacroAssembler: public Assembler {
// scratch can be object itself, but it will be clobbered.
void InNewSpace(Register object,
Register scratch,
Condition cc, // eq for new space, ne otherwise
Condition cond, // eq for new space, ne otherwise
Label* branch);
......@@ -685,14 +685,14 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Debugging
// Calls Abort(msg) if the condition cc is not satisfied.
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cc, const char* msg);
void Assert(Condition cond, const char* msg);
void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
void AssertFastElements(Register elements);
// Like Assert(), but always enabled.
void Check(Condition cc, const char* msg);
void Check(Condition cond, const char* msg);
// Print a message to stdout and abort execution.
void Abort(const char* msg);
......
This diff is collapsed.
......@@ -80,8 +80,8 @@ class SimulatorStack : public v8::internal::AllStatic {
#include "constants-arm.h"
#include "hashmap.h"
namespace assembler {
namespace arm {
namespace v8 {
namespace internal {
class CachePage {
public:
......@@ -203,11 +203,11 @@ class Simulator {
};
// Unsupported instructions use Format to print an error and stop execution.
void Format(Instr* instr, const char* format);
void Format(Instruction* instr, const char* format);
// Checks if the current instruction should be executed based on its
// condition bits.
bool ConditionallyExecute(Instr* instr);
bool ConditionallyExecute(Instruction* instr);
// Helper functions to set the conditional flags in the architecture state.
void SetNZFlags(int32_t val);
......@@ -225,13 +225,13 @@ class Simulator {
void Copy_FPSCR_to_APSR();
// Helper functions to decode common "addressing" modes
int32_t GetShiftRm(Instr* instr, bool* carry_out);
int32_t GetImm(Instr* instr, bool* carry_out);
void HandleRList(Instr* instr, bool load);
void SoftwareInterrupt(Instr* instr);
int32_t GetShiftRm(Instruction* instr, bool* carry_out);
int32_t GetImm(Instruction* instr, bool* carry_out);
void HandleRList(Instruction* instr, bool load);
void SoftwareInterrupt(Instruction* instr);
// Stop helper functions.
inline bool isStopInstruction(Instr* instr);
inline bool isStopInstruction(Instruction* instr);
inline bool isWatchedStop(uint32_t bkpt_code);
inline bool isEnabledStop(uint32_t bkpt_code);
inline void EnableStop(uint32_t bkpt_code);
......@@ -245,41 +245,42 @@ class Simulator {
inline void WriteB(int32_t addr, uint8_t value);
inline void WriteB(int32_t addr, int8_t value);
inline uint16_t ReadHU(int32_t addr, Instr* instr);
inline int16_t ReadH(int32_t addr, Instr* instr);
inline uint16_t ReadHU(int32_t addr, Instruction* instr);
inline int16_t ReadH(int32_t addr, Instruction* instr);
// Note: Overloaded on the sign of the value.
inline void WriteH(int32_t addr, uint16_t value, Instr* instr);
inline void WriteH(int32_t addr, int16_t value, Instr* instr);
inline void WriteH(int32_t addr, uint16_t value, Instruction* instr);
inline void WriteH(int32_t addr, int16_t value, Instruction* instr);
inline int ReadW(int32_t addr, Instr* instr);
inline void WriteW(int32_t addr, int value, Instr* instr);
inline int ReadW(int32_t addr, Instruction* instr);
inline void WriteW(int32_t addr, int value, Instruction* instr);
int32_t* ReadDW(int32_t addr);
void WriteDW(int32_t addr, int32_t value1, int32_t value2);
// Executing is handled based on the instruction type.
void DecodeType01(Instr* instr); // both type 0 and type 1 rolled into one
void DecodeType2(Instr* instr);
void DecodeType3(Instr* instr);
void DecodeType4(Instr* instr);
void DecodeType5(Instr* instr);
void DecodeType6(Instr* instr);
void DecodeType7(Instr* instr);
// Both type 0 and type 1 rolled into one.
void DecodeType01(Instruction* instr);
void DecodeType2(Instruction* instr);
void DecodeType3(Instruction* instr);
void DecodeType4(Instruction* instr);
void DecodeType5(Instruction* instr);
void DecodeType6(Instruction* instr);
void DecodeType7(Instruction* instr);
// Support for VFP.
void DecodeTypeVFP(Instr* instr);
void DecodeType6CoprocessorIns(Instr* instr);
void DecodeTypeVFP(Instruction* instr);
void DecodeType6CoprocessorIns(Instruction* instr);
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr);
void DecodeVCMP(Instr* instr);
void DecodeVCVTBetweenDoubleAndSingle(Instr* instr);
void DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr);
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
void DecodeVCMP(Instruction* instr);
void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
void DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr);
// Executes one instruction.
void InstructionDecode(Instr* instr);
void InstructionDecode(Instruction* instr);
// ICache.
static void CheckICache(Instr* instr);
static void CheckICache(Instruction* instr);
static void FlushOnePage(intptr_t start, int size);
static CachePage* GetCachePage(void* page);
......@@ -330,8 +331,8 @@ class Simulator {
static v8::internal::HashMap* i_cache_;
// Registered breakpoints.
Instr* break_pc_;
instr_t break_instr_;
Instruction* break_pc_;
Instr break_instr_;
// A stop is watched if its code is less than kNumOfWatchedStops.
// Only watched stops support enabling/disabling and the counter feature.
......@@ -344,27 +345,22 @@ class Simulator {
// instruction, if bit 31 of watched_stops[code].count is unset.
// The value watched_stops[code].count & ~(1 << 31) indicates how many times
// the breakpoint was hit or gone through.
struct StopCoundAndDesc {
struct StopCountAndDesc {
uint32_t count;
char* desc;
};
StopCoundAndDesc watched_stops[kNumOfWatchedStops];
StopCountAndDesc watched_stops[kNumOfWatchedStops];
};
} } // namespace assembler::arm
namespace v8 {
namespace internal {
// When running with the simulator transition into simulated execution at this
// point.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
reinterpret_cast<Object*>(assembler::arm::Simulator::current()->Call( \
reinterpret_cast<Object*>(Simulator::current()->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
assembler::arm::Simulator::current()->Call( \
Simulator::current()->Call( \
FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
......@@ -380,16 +376,16 @@ namespace internal {
class SimulatorStack : public v8::internal::AllStatic {
public:
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
return assembler::arm::Simulator::current()->StackLimit();
return Simulator::current()->StackLimit();
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
assembler::arm::Simulator* sim = assembler::arm::Simulator::current();
Simulator* sim = Simulator::current();
return sim->PushAddress(try_catch_address);
}
static inline void UnregisterCTryCatch() {
assembler::arm::Simulator::current()->PopAddress();
Simulator::current()->PopAddress();
}
};
......
......@@ -72,7 +72,7 @@ void ThreadLocalTop::Initialize() {
handler_ = 0;
#ifdef USE_SIMULATOR
#ifdef V8_TARGET_ARCH_ARM
simulator_ = assembler::arm::Simulator::current();
simulator_ = Simulator::current();
#elif V8_TARGET_ARCH_MIPS
simulator_ = assembler::mips::Simulator::current();
#endif
......@@ -1095,7 +1095,7 @@ char* Top::RestoreThread(char* from) {
// thread_local_ is restored on a separate OS thread.
#ifdef USE_SIMULATOR
#ifdef V8_TARGET_ARCH_ARM
thread_local_.simulator_ = assembler::arm::Simulator::current();
thread_local_.simulator_ = Simulator::current();
#elif V8_TARGET_ARCH_MIPS
thread_local_.simulator_ = assembler::mips::Simulator::current();
#endif
......
......@@ -109,7 +109,7 @@ class ThreadLocalTop BASE_EMBEDDED {
#ifdef USE_SIMULATOR
#ifdef V8_TARGET_ARCH_ARM
assembler::arm::Simulator* simulator_;
Simulator* simulator_;
#elif V8_TARGET_ARCH_MIPS
assembler::mips::Simulator* simulator_;
#endif
......
......@@ -79,7 +79,7 @@ bool V8::Initialize(Deserializer* des) {
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
#if defined(V8_TARGET_ARCH_ARM)
::assembler::arm::Simulator::Initialize();
Simulator::Initialize();
#elif defined(V8_TARGET_ARCH_MIPS)
::assembler::mips::Simulator::Initialize();
#endif
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment