Commit 0e183035 authored by ager@chromium.org's avatar ager@chromium.org

ARM: Merging constants in simulator and assembler header files and other cleanup.

First stab at a general ARM cleanup patch. It merges ARM constants so that they can be used across simulator, assembler and disassembler, and tidies up some syntax and ambiguities.

BUG=none
TEST=none

Review URL: http://codereview.chromium.org/6274009

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@6483 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 591eb3f4
This diff is collapsed.
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#define V8_ARM_ASSEMBLER_ARM_H_ #define V8_ARM_ASSEMBLER_ARM_H_
#include <stdio.h> #include <stdio.h>
#include "assembler.h" #include "assembler.h"
#include "constants-arm.h"
#include "serialize.h" #include "serialize.h"
namespace v8 { namespace v8 {
...@@ -300,18 +301,6 @@ const DwVfpRegister d13 = { 13 }; ...@@ -300,18 +301,6 @@ const DwVfpRegister d13 = { 13 };
const DwVfpRegister d14 = { 14 }; const DwVfpRegister d14 = { 14 };
const DwVfpRegister d15 = { 15 }; const DwVfpRegister d15 = { 15 };
// VFP FPSCR constants.
static const uint32_t kVFPNConditionFlagBit = 1 << 31;
static const uint32_t kVFPZConditionFlagBit = 1 << 30;
static const uint32_t kVFPCConditionFlagBit = 1 << 29;
static const uint32_t kVFPVConditionFlagBit = 1 << 28;
static const uint32_t kVFPFlushToZeroMask = 1 << 24;
static const uint32_t kVFPRoundingModeMask = 3 << 22;
static const uint32_t kVFPRoundToMinusInfinityBits = 2 << 22;
static const uint32_t kVFPExceptionMask = 0xf;
// Coprocessor register // Coprocessor register
struct CRegister { struct CRegister {
...@@ -372,149 +361,6 @@ enum Coprocessor { ...@@ -372,149 +361,6 @@ enum Coprocessor {
}; };
// Condition field in instructions.
enum Condition {
// any value < 0 is considered no_condition
no_condition = -1,
eq = 0 << 28, // Z set equal.
ne = 1 << 28, // Z clear not equal.
nz = 1 << 28, // Z clear not zero.
cs = 2 << 28, // C set carry set.
hs = 2 << 28, // C set unsigned higher or same.
cc = 3 << 28, // C clear carry clear.
lo = 3 << 28, // C clear unsigned lower.
mi = 4 << 28, // N set negative.
pl = 5 << 28, // N clear positive or zero.
vs = 6 << 28, // V set overflow.
vc = 7 << 28, // V clear no overflow.
hi = 8 << 28, // C set, Z clear unsigned higher.
ls = 9 << 28, // C clear or Z set unsigned lower or same.
ge = 10 << 28, // N == V greater or equal.
lt = 11 << 28, // N != V less than.
gt = 12 << 28, // Z clear, N == V greater than.
le = 13 << 28, // Z set or N != V less then or equal
al = 14 << 28 // always.
};
// Returns the equivalent of !cc.
inline Condition NegateCondition(Condition cc) {
ASSERT(cc != al);
return static_cast<Condition>(cc ^ ne);
}
// Corresponds to transposing the operands of a comparison.
inline Condition ReverseCondition(Condition cc) {
switch (cc) {
case lo:
return hi;
case hi:
return lo;
case hs:
return ls;
case ls:
return hs;
case lt:
return gt;
case gt:
return lt;
case ge:
return le;
case le:
return ge;
default:
return cc;
};
}
// Branch hints are not used on the ARM. They are defined so that they can
// appear in shared function signatures, but will be ignored in ARM
// implementations.
enum Hint { no_hint };
// Hints are not used on the arm. Negating is trivial.
inline Hint NegateHint(Hint ignored) { return no_hint; }
// -----------------------------------------------------------------------------
// Addressing modes and instruction variants
// Shifter operand shift operation
enum ShiftOp {
LSL = 0 << 5,
LSR = 1 << 5,
ASR = 2 << 5,
ROR = 3 << 5,
RRX = -1
};
// Condition code updating mode
enum SBit {
SetCC = 1 << 20, // set condition code
LeaveCC = 0 << 20 // leave condition code unchanged
};
// Status register selection
enum SRegister {
CPSR = 0 << 22,
SPSR = 1 << 22
};
// Status register fields
enum SRegisterField {
CPSR_c = CPSR | 1 << 16,
CPSR_x = CPSR | 1 << 17,
CPSR_s = CPSR | 1 << 18,
CPSR_f = CPSR | 1 << 19,
SPSR_c = SPSR | 1 << 16,
SPSR_x = SPSR | 1 << 17,
SPSR_s = SPSR | 1 << 18,
SPSR_f = SPSR | 1 << 19
};
// Status register field mask (or'ed SRegisterField enum values)
typedef uint32_t SRegisterFieldMask;
// Memory operand addressing mode
enum AddrMode {
// bit encoding P U W
Offset = (8|4|0) << 21, // offset (without writeback to base)
PreIndex = (8|4|1) << 21, // pre-indexed addressing with writeback
PostIndex = (0|4|0) << 21, // post-indexed addressing with writeback
NegOffset = (8|0|0) << 21, // negative offset (without writeback to base)
NegPreIndex = (8|0|1) << 21, // negative pre-indexed with writeback
NegPostIndex = (0|0|0) << 21 // negative post-indexed with writeback
};
// Load/store multiple addressing mode
enum BlockAddrMode {
// bit encoding P U W
da = (0|0|0) << 21, // decrement after
ia = (0|4|0) << 21, // increment after
db = (8|0|0) << 21, // decrement before
ib = (8|4|0) << 21, // increment before
da_w = (0|0|1) << 21, // decrement after with writeback to base
ia_w = (0|4|1) << 21, // increment after with writeback to base
db_w = (8|0|1) << 21, // decrement before with writeback to base
ib_w = (8|4|1) << 21 // increment before with writeback to base
};
// Coprocessor load/store operand size
enum LFlag {
Long = 1 << 22, // long load/store coprocessor
Short = 0 << 22 // short load/store coprocessor
};
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Machine instruction Operands // Machine instruction Operands
...@@ -658,9 +504,6 @@ class CpuFeatures : public AllStatic { ...@@ -658,9 +504,6 @@ class CpuFeatures : public AllStatic {
}; };
typedef int32_t Instr;
extern const Instr kMovLrPc; extern const Instr kMovLrPc;
extern const Instr kLdrPCMask; extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern; extern const Instr kLdrPCPattern;
...@@ -680,15 +523,11 @@ extern const Instr kMovwLeaveCCFlip; ...@@ -680,15 +523,11 @@ extern const Instr kMovwLeaveCCFlip;
extern const Instr kCmpCmnMask; extern const Instr kCmpCmnMask;
extern const Instr kCmpCmnPattern; extern const Instr kCmpCmnPattern;
extern const Instr kCmpCmnFlip; extern const Instr kCmpCmnFlip;
extern const Instr kALUMask;
extern const Instr kAddPattern;
extern const Instr kSubPattern;
extern const Instr kAndPattern;
extern const Instr kBicPattern;
extern const Instr kAddSubFlip; extern const Instr kAddSubFlip;
extern const Instr kAndBicFlip; extern const Instr kAndBicFlip;
class Assembler : public Malloced { class Assembler : public Malloced {
public: public:
// Create an assembler. Instructions and relocation information are emitted // Create an assembler. Instructions and relocation information are emitted
...@@ -1001,7 +840,6 @@ class Assembler : public Malloced { ...@@ -1001,7 +840,6 @@ class Assembler : public Malloced {
void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al); void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
// Exception-generating instructions and debugging support // Exception-generating instructions and debugging support
static const int kDefaultStopCode = -1;
void stop(const char* msg, void stop(const char* msg,
Condition cond = al, Condition cond = al,
int32_t code = kDefaultStopCode); int32_t code = kDefaultStopCode);
......
...@@ -190,7 +190,7 @@ static void AllocateJSArray(MacroAssembler* masm, ...@@ -190,7 +190,7 @@ static void AllocateJSArray(MacroAssembler* masm,
// Check whether an empty sized array is requested. // Check whether an empty sized array is requested.
__ tst(array_size, array_size); __ tst(array_size, array_size);
__ b(nz, &not_empty); __ b(ne, &not_empty);
// If an empty array is requested allocate a small elements array anyway. This // If an empty array is requested allocate a small elements array anyway. This
// keeps the code below free of special casing for the empty array. // keeps the code below free of special casing for the empty array.
...@@ -666,7 +666,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, ...@@ -666,7 +666,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ mov(r2, Operand(debug_step_in_fp)); __ mov(r2, Operand(debug_step_in_fp));
__ ldr(r2, MemOperand(r2)); __ ldr(r2, MemOperand(r2));
__ tst(r2, r2); __ tst(r2, r2);
__ b(nz, &rt_call); __ b(ne, &rt_call);
#endif #endif
// Load the initial map and verify that it is in fact a map. // Load the initial map and verify that it is in fact a map.
......
...@@ -41,7 +41,7 @@ namespace internal { ...@@ -41,7 +41,7 @@ namespace internal {
static void EmitIdenticalObjectComparison(MacroAssembler* masm, static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow, Label* slow,
Condition cc, Condition cond,
bool never_nan_nan); bool never_nan_nan);
static void EmitSmiNonsmiComparison(MacroAssembler* masm, static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs, Register lhs,
...@@ -49,7 +49,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, ...@@ -49,7 +49,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Label* lhs_not_nan, Label* lhs_not_nan,
Label* slow, Label* slow,
bool strict); bool strict);
static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc); static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond);
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register lhs, Register lhs,
Register rhs); Register rhs);
...@@ -544,7 +544,7 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { ...@@ -544,7 +544,7 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
// for "identity and not NaN". // for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm, static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow, Label* slow,
Condition cc, Condition cond,
bool never_nan_nan) { bool never_nan_nan) {
Label not_identical; Label not_identical;
Label heap_number, return_equal; Label heap_number, return_equal;
...@@ -553,31 +553,31 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, ...@@ -553,31 +553,31 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// The two objects are identical. If we know that one of them isn't NaN then // The two objects are identical. If we know that one of them isn't NaN then
// we now know they test equal. // we now know they test equal.
if (cc != eq || !never_nan_nan) { if (cond != eq || !never_nan_nan) {
// Test for NaN. Sadly, we can't just compare to Factory::nan_value(), // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
// so we do the second best thing - test it ourselves. // so we do the second best thing - test it ourselves.
// They are both equal and they are not both Smis so both of them are not // They are both equal and they are not both Smis so both of them are not
// Smis. If it's not a heap number, then return equal. // Smis. If it's not a heap number, then return equal.
if (cc == lt || cc == gt) { if (cond == lt || cond == gt) {
__ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE); __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
__ b(ge, slow); __ b(ge, slow);
} else { } else {
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
__ b(eq, &heap_number); __ b(eq, &heap_number);
// Comparing JS objects with <=, >= is complicated. // Comparing JS objects with <=, >= is complicated.
if (cc != eq) { if (cond != eq) {
__ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
__ b(ge, slow); __ b(ge, slow);
// Normally here we fall through to return_equal, but undefined is // Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but // special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5. // (undefined <= undefined) == false! See ECMAScript 11.8.5.
if (cc == le || cc == ge) { if (cond == le || cond == ge) {
__ cmp(r4, Operand(ODDBALL_TYPE)); __ cmp(r4, Operand(ODDBALL_TYPE));
__ b(ne, &return_equal); __ b(ne, &return_equal);
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex); __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ cmp(r0, r2); __ cmp(r0, r2);
__ b(ne, &return_equal); __ b(ne, &return_equal);
if (cc == le) { if (cond == le) {
// undefined <= undefined should fail. // undefined <= undefined should fail.
__ mov(r0, Operand(GREATER)); __ mov(r0, Operand(GREATER));
} else { } else {
...@@ -591,20 +591,20 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, ...@@ -591,20 +591,20 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
} }
__ bind(&return_equal); __ bind(&return_equal);
if (cc == lt) { if (cond == lt) {
__ mov(r0, Operand(GREATER)); // Things aren't less than themselves. __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
} else if (cc == gt) { } else if (cond == gt) {
__ mov(r0, Operand(LESS)); // Things aren't greater than themselves. __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
} else { } else {
__ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves. __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
} }
__ Ret(); __ Ret();
if (cc != eq || !never_nan_nan) { if (cond != eq || !never_nan_nan) {
// For less and greater we don't have to check for NaN since the result of // For less and greater we don't have to check for NaN since the result of
// x < x is false regardless. For the others here is some code to check // x < x is false regardless. For the others here is some code to check
// for NaN. // for NaN.
if (cc != lt && cc != gt) { if (cond != lt && cond != gt) {
__ bind(&heap_number); __ bind(&heap_number);
// It is a heap number, so return non-equal if it's NaN and equal if it's // It is a heap number, so return non-equal if it's NaN and equal if it's
// not NaN. // not NaN.
...@@ -628,10 +628,10 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, ...@@ -628,10 +628,10 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// if all bits in mantissa are zero (it's an Infinity) and non-zero if // if all bits in mantissa are zero (it's an Infinity) and non-zero if
// not (it's a NaN). For <= and >= we need to load r0 with the failing // not (it's a NaN). For <= and >= we need to load r0 with the failing
// value if it's a NaN. // value if it's a NaN.
if (cc != eq) { if (cond != eq) {
// All-zero means Infinity means equal. // All-zero means Infinity means equal.
__ Ret(eq); __ Ret(eq);
if (cc == le) { if (cond == le) {
__ mov(r0, Operand(GREATER)); // NaN <= NaN should fail. __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
} else { } else {
__ mov(r0, Operand(LESS)); // NaN >= NaN should fail. __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
...@@ -738,7 +738,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, ...@@ -738,7 +738,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
} }
void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) { void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) {
bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
Register rhs_exponent = exp_first ? r0 : r1; Register rhs_exponent = exp_first ? r0 : r1;
Register lhs_exponent = exp_first ? r2 : r3; Register lhs_exponent = exp_first ? r2 : r3;
...@@ -778,7 +778,7 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) { ...@@ -778,7 +778,7 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
__ bind(&one_is_nan); __ bind(&one_is_nan);
// NaN comparisons always fail. // NaN comparisons always fail.
// Load whatever we need in r0 to make the comparison fail. // Load whatever we need in r0 to make the comparison fail.
if (cc == lt || cc == le) { if (cond == lt || cond == le) {
__ mov(r0, Operand(GREATER)); __ mov(r0, Operand(GREATER));
} else { } else {
__ mov(r0, Operand(LESS)); __ mov(r0, Operand(LESS));
...@@ -790,7 +790,8 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) { ...@@ -790,7 +790,8 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
// See comment at call site. // See comment at call site.
static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) { static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
Condition cond) {
bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
Register rhs_exponent = exp_first ? r0 : r1; Register rhs_exponent = exp_first ? r0 : r1;
Register lhs_exponent = exp_first ? r2 : r3; Register lhs_exponent = exp_first ? r2 : r3;
...@@ -798,7 +799,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) { ...@@ -798,7 +799,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
Register lhs_mantissa = exp_first ? r3 : r2; Register lhs_mantissa = exp_first ? r3 : r2;
// r0, r1, r2, r3 have the two doubles. Neither is a NaN. // r0, r1, r2, r3 have the two doubles. Neither is a NaN.
if (cc == eq) { if (cond == eq) {
// Doubles are not equal unless they have the same bit pattern. // Doubles are not equal unless they have the same bit pattern.
// Exception: 0 and -0. // Exception: 0 and -0.
__ cmp(rhs_mantissa, Operand(lhs_mantissa)); __ cmp(rhs_mantissa, Operand(lhs_mantissa));
...@@ -1087,7 +1088,7 @@ void CompareStub::Generate(MacroAssembler* masm) { ...@@ -1087,7 +1088,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
} else if (FLAG_debug_code) { } else if (FLAG_debug_code) {
__ orr(r2, r1, r0); __ orr(r2, r1, r0);
__ tst(r2, Operand(kSmiTagMask)); __ tst(r2, Operand(kSmiTagMask));
__ Assert(nz, "CompareStub: unexpected smi operands."); __ Assert(ne, "CompareStub: unexpected smi operands.");
} }
// NOTICE! This code is only reached after a smi-fast-case check, so // NOTICE! This code is only reached after a smi-fast-case check, so
...@@ -3834,7 +3835,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { ...@@ -3834,7 +3835,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset)); __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
if (FLAG_debug_code) { if (FLAG_debug_code) {
__ tst(regexp_data, Operand(kSmiTagMask)); __ tst(regexp_data, Operand(kSmiTagMask));
__ Check(nz, "Unexpected type for RegExp data, FixedArray expected"); __ Check(ne, "Unexpected type for RegExp data, FixedArray expected");
__ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE); __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
__ Check(eq, "Unexpected type for RegExp data, FixedArray expected"); __ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
} }
...@@ -3937,7 +3938,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { ...@@ -3937,7 +3938,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Is first part a flat string? // Is first part a flat string?
STATIC_ASSERT(kSeqStringTag == 0); STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r0, Operand(kStringRepresentationMask)); __ tst(r0, Operand(kStringRepresentationMask));
__ b(nz, &runtime); __ b(ne, &runtime);
__ bind(&seq_string); __ bind(&seq_string);
// subject: Subject string // subject: Subject string
...@@ -4385,13 +4386,13 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { ...@@ -4385,13 +4386,13 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the first cons component is also non-flat, then go to runtime. // If the first cons component is also non-flat, then go to runtime.
STATIC_ASSERT(kSeqStringTag == 0); STATIC_ASSERT(kSeqStringTag == 0);
__ tst(result_, Operand(kStringRepresentationMask)); __ tst(result_, Operand(kStringRepresentationMask));
__ b(nz, &call_runtime_); __ b(ne, &call_runtime_);
// Check for 1-byte or 2-byte string. // Check for 1-byte or 2-byte string.
__ bind(&flat_string); __ bind(&flat_string);
STATIC_ASSERT(kAsciiStringTag != 0); STATIC_ASSERT(kAsciiStringTag != 0);
__ tst(result_, Operand(kStringEncodingMask)); __ tst(result_, Operand(kStringEncodingMask));
__ b(nz, &ascii_string); __ b(ne, &ascii_string);
// 2-byte string. // 2-byte string.
// Load the 2-byte character code into the result register. We can // Load the 2-byte character code into the result register. We can
...@@ -4476,7 +4477,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { ...@@ -4476,7 +4477,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
__ tst(code_, __ tst(code_,
Operand(kSmiTagMask | Operand(kSmiTagMask |
((~String::kMaxAsciiCharCode) << kSmiTagSize))); ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
__ b(nz, &slow_case_); __ b(ne, &slow_case_);
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
// At this point code register contains smi tagged ascii char code. // At this point code register contains smi tagged ascii char code.
...@@ -4923,7 +4924,7 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm, ...@@ -4923,7 +4924,7 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
__ add(hash, hash, Operand(hash, LSL, 15), SetCC); __ add(hash, hash, Operand(hash, LSL, 15), SetCC);
// if (hash == 0) hash = 27; // if (hash == 0) hash = 27;
__ mov(hash, Operand(27), LeaveCC, nz); __ mov(hash, Operand(27), LeaveCC, ne);
} }
......
...@@ -39,7 +39,7 @@ namespace internal { ...@@ -39,7 +39,7 @@ namespace internal {
// Platform-specific inline functions. // Platform-specific inline functions.
void DeferredCode::Jump() { __ jmp(&entry_label_); } void DeferredCode::Jump() { __ jmp(&entry_label_); }
void DeferredCode::Branch(Condition cc) { __ b(cc, &entry_label_); } void DeferredCode::Branch(Condition cond) { __ b(cond, &entry_label_); }
#undef __ #undef __
......
...@@ -1589,7 +1589,7 @@ void CodeGenerator::SmiOperation(Token::Value op, ...@@ -1589,7 +1589,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
} }
void CodeGenerator::Comparison(Condition cc, void CodeGenerator::Comparison(Condition cond,
Expression* left, Expression* left,
Expression* right, Expression* right,
bool strict) { bool strict) {
...@@ -1603,7 +1603,7 @@ void CodeGenerator::Comparison(Condition cc, ...@@ -1603,7 +1603,7 @@ void CodeGenerator::Comparison(Condition cc,
// result : cc register // result : cc register
// Strict only makes sense for equality comparisons. // Strict only makes sense for equality comparisons.
ASSERT(!strict || cc == eq); ASSERT(!strict || cond == eq);
Register lhs; Register lhs;
Register rhs; Register rhs;
...@@ -1614,8 +1614,8 @@ void CodeGenerator::Comparison(Condition cc, ...@@ -1614,8 +1614,8 @@ void CodeGenerator::Comparison(Condition cc,
// We load the top two stack positions into registers chosen by the virtual // We load the top two stack positions into registers chosen by the virtual
// frame. This should keep the register shuffling to a minimum. // frame. This should keep the register shuffling to a minimum.
// Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order. // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
if (cc == gt || cc == le) { if (cond == gt || cond == le) {
cc = ReverseCondition(cc); cond = ReverseCondition(cond);
lhs_is_smi = frame_->KnownSmiAt(0); lhs_is_smi = frame_->KnownSmiAt(0);
rhs_is_smi = frame_->KnownSmiAt(1); rhs_is_smi = frame_->KnownSmiAt(1);
lhs = frame_->PopToRegister(); lhs = frame_->PopToRegister();
...@@ -1655,7 +1655,7 @@ void CodeGenerator::Comparison(Condition cc, ...@@ -1655,7 +1655,7 @@ void CodeGenerator::Comparison(Condition cc,
// Perform non-smi comparison by stub. // Perform non-smi comparison by stub.
// CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0. // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
// We call with 0 args because there are 0 on the stack. // We call with 0 args because there are 0 on the stack.
CompareStub stub(cc, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs); CompareStub stub(cond, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs);
frame_->CallStub(&stub, 0); frame_->CallStub(&stub, 0);
__ cmp(r0, Operand(0, RelocInfo::NONE)); __ cmp(r0, Operand(0, RelocInfo::NONE));
exit.Jump(); exit.Jump();
...@@ -1667,7 +1667,7 @@ void CodeGenerator::Comparison(Condition cc, ...@@ -1667,7 +1667,7 @@ void CodeGenerator::Comparison(Condition cc,
__ cmp(lhs, Operand(rhs)); __ cmp(lhs, Operand(rhs));
exit.Bind(); exit.Bind();
cc_reg_ = cc; cc_reg_ = cond;
} }
...@@ -1885,8 +1885,8 @@ void CodeGenerator::CallApplyLazy(Expression* applicand, ...@@ -1885,8 +1885,8 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
void CodeGenerator::Branch(bool if_true, JumpTarget* target) { void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
ASSERT(has_cc()); ASSERT(has_cc());
Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_); Condition cond = if_true ? cc_reg_ : NegateCondition(cc_reg_);
target->Branch(cc); target->Branch(cond);
cc_reg_ = al; cc_reg_ = al;
} }
...@@ -5572,7 +5572,7 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) { ...@@ -5572,7 +5572,7 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
deferred->Branch(lt); deferred->Branch(lt);
__ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset)); __ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset));
__ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask)); __ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
deferred->Branch(nz); deferred->Branch(ne);
// Check the object's elements are in fast case and writable. // Check the object's elements are in fast case and writable.
__ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset)); __ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset));
...@@ -5589,7 +5589,7 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) { ...@@ -5589,7 +5589,7 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
__ mov(tmp2, index1); __ mov(tmp2, index1);
__ orr(tmp2, tmp2, index2); __ orr(tmp2, tmp2, index2);
__ tst(tmp2, Operand(kSmiTagMask)); __ tst(tmp2, Operand(kSmiTagMask));
deferred->Branch(nz); deferred->Branch(ne);
// Check that both indices are valid. // Check that both indices are valid.
__ ldr(tmp2, FieldMemOperand(object, JSArray::kLengthOffset)); __ ldr(tmp2, FieldMemOperand(object, JSArray::kLengthOffset));
......
...@@ -32,12 +32,10 @@ ...@@ -32,12 +32,10 @@
#include "constants-arm.h" #include "constants-arm.h"
namespace assembler { namespace v8 {
namespace arm { namespace internal {
namespace v8i = v8::internal; double Instruction::DoubleImmedVmov() const {
double Instr::DoubleImmedVmov() const {
// Reconstruct a double from the immediate encoded in the vmov instruction. // Reconstruct a double from the immediate encoded in the vmov instruction.
// //
// instruction: [xxxxxxxx,xxxxabcd,xxxxxxxx,xxxxefgh] // instruction: [xxxxxxxx,xxxxabcd,xxxxxxxx,xxxxefgh]
...@@ -149,6 +147,6 @@ int Registers::Number(const char* name) { ...@@ -149,6 +147,6 @@ int Registers::Number(const char* name) {
} }
} } // namespace assembler::arm } } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM #endif // V8_TARGET_ARCH_ARM
This diff is collapsed.
...@@ -56,7 +56,7 @@ void CPU::FlushICache(void* start, size_t size) { ...@@ -56,7 +56,7 @@ void CPU::FlushICache(void* start, size_t size) {
// that the Icache was flushed. // that the Icache was flushed.
// None of this code ends up in the snapshot so there are no issues // None of this code ends up in the snapshot so there are no issues
// around whether or not to generate the code when building snapshots. // around whether or not to generate the code when building snapshots.
assembler::arm::Simulator::FlushICache(start, size); Simulator::FlushICache(start, size);
#else #else
// Ideally, we would call // Ideally, we would call
// syscall(__ARM_NR_cacheflush, start, // syscall(__ARM_NR_cacheflush, start,
......
This diff is collapsed.
...@@ -517,16 +517,16 @@ void FullCodeGenerator::DoTest(Label* if_true, ...@@ -517,16 +517,16 @@ void FullCodeGenerator::DoTest(Label* if_true,
} }
void FullCodeGenerator::Split(Condition cc, void FullCodeGenerator::Split(Condition cond,
Label* if_true, Label* if_true,
Label* if_false, Label* if_false,
Label* fall_through) { Label* fall_through) {
if (if_false == fall_through) { if (if_false == fall_through) {
__ b(cc, if_true); __ b(cond, if_true);
} else if (if_true == fall_through) { } else if (if_true == fall_through) {
__ b(NegateCondition(cc), if_false); __ b(NegateCondition(cond), if_false);
} else { } else {
__ b(cc, if_true); __ b(cond, if_true);
__ b(if_false); __ b(if_false);
} }
} }
...@@ -3461,34 +3461,34 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { ...@@ -3461,34 +3461,34 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: { default: {
VisitForAccumulatorValue(expr->right()); VisitForAccumulatorValue(expr->right());
Condition cc = eq; Condition cond = eq;
bool strict = false; bool strict = false;
switch (op) { switch (op) {
case Token::EQ_STRICT: case Token::EQ_STRICT:
strict = true; strict = true;
// Fall through // Fall through
case Token::EQ: case Token::EQ:
cc = eq; cond = eq;
__ pop(r1); __ pop(r1);
break; break;
case Token::LT: case Token::LT:
cc = lt; cond = lt;
__ pop(r1); __ pop(r1);
break; break;
case Token::GT: case Token::GT:
// Reverse left and right sides to obtain ECMA-262 conversion order. // Reverse left and right sides to obtain ECMA-262 conversion order.
cc = lt; cond = lt;
__ mov(r1, result_register()); __ mov(r1, result_register());
__ pop(r0); __ pop(r0);
break; break;
case Token::LTE: case Token::LTE:
// Reverse left and right sides to obtain ECMA-262 conversion order. // Reverse left and right sides to obtain ECMA-262 conversion order.
cc = ge; cond = ge;
__ mov(r1, result_register()); __ mov(r1, result_register());
__ pop(r0); __ pop(r0);
break; break;
case Token::GTE: case Token::GTE:
cc = ge; cond = ge;
__ pop(r1); __ pop(r1);
break; break;
case Token::IN: case Token::IN:
...@@ -3503,17 +3503,17 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { ...@@ -3503,17 +3503,17 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ orr(r2, r0, Operand(r1)); __ orr(r2, r0, Operand(r1));
__ JumpIfNotSmi(r2, &slow_case); __ JumpIfNotSmi(r2, &slow_case);
__ cmp(r1, r0); __ cmp(r1, r0);
Split(cc, if_true, if_false, NULL); Split(cond, if_true, if_false, NULL);
__ bind(&slow_case); __ bind(&slow_case);
} }
CompareFlags flags = inline_smi_code CompareFlags flags = inline_smi_code
? NO_SMI_COMPARE_IN_STUB ? NO_SMI_COMPARE_IN_STUB
: NO_COMPARE_FLAGS; : NO_COMPARE_FLAGS;
CompareStub stub(cc, strict, flags, r1, r0); CompareStub stub(cond, strict, flags, r1, r0);
__ CallStub(&stub); __ CallStub(&stub);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ cmp(r0, Operand(0, RelocInfo::NONE)); __ cmp(r0, Operand(0, RelocInfo::NONE));
Split(cc, if_true, if_false, fall_through); Split(cond, if_true, if_false, fall_through);
} }
} }
......
...@@ -95,13 +95,13 @@ static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm, ...@@ -95,13 +95,13 @@ static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
__ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset)); __ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset));
__ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) | __ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) |
(1 << Map::kHasNamedInterceptor))); (1 << Map::kHasNamedInterceptor)));
__ b(nz, miss); __ b(ne, miss);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); __ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset)); __ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex); __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(t1, ip); __ cmp(t1, ip);
__ b(nz, miss); __ b(ne, miss);
} }
...@@ -427,7 +427,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, ...@@ -427,7 +427,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
__ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
__ tst(scratch, __ tst(scratch,
Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit))); Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
__ b(nz, slow); __ b(ne, slow);
// Check that the object is some kind of JS object EXCEPT JS Value type. // Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object, // In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing into string // we enter the runtime system to make sure that indexing into string
...@@ -1674,7 +1674,7 @@ Condition CompareIC::ComputeCondition(Token::Value op) { ...@@ -1674,7 +1674,7 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
return ge; return ge;
default: default:
UNREACHABLE(); UNREACHABLE();
return no_condition; return kNoCondition;
} }
} }
......
...@@ -76,7 +76,7 @@ void JumpTarget::DoJump() { ...@@ -76,7 +76,7 @@ void JumpTarget::DoJump() {
} }
void JumpTarget::DoBranch(Condition cc, Hint ignored) { void JumpTarget::DoBranch(Condition cond, Hint ignored) {
ASSERT(cgen()->has_valid_frame()); ASSERT(cgen()->has_valid_frame());
if (entry_frame_set_) { if (entry_frame_set_) {
...@@ -86,7 +86,7 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) { ...@@ -86,7 +86,7 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) {
ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame())); ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
} }
// We have an expected frame to merge to on the backward edge. // We have an expected frame to merge to on the backward edge.
cgen()->frame()->MergeTo(&entry_frame_, cc); cgen()->frame()->MergeTo(&entry_frame_, cond);
} else { } else {
// Clone the current frame to use as the expected one at the target. // Clone the current frame to use as the expected one at the target.
set_entry_frame(cgen()->frame()); set_entry_frame(cgen()->frame());
...@@ -98,8 +98,8 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) { ...@@ -98,8 +98,8 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) {
// frame with less precise type info branches to them. // frame with less precise type info branches to them.
ASSERT(direction_ != FORWARD_ONLY); ASSERT(direction_ != FORWARD_ONLY);
} }
__ b(cc, &entry_label_); __ b(cond, &entry_label_);
if (cc == al) { if (cond == al) {
cgen()->DeleteFrame(); cgen()->DeleteFrame();
} }
} }
......
...@@ -661,7 +661,7 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { ...@@ -661,7 +661,7 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
return; return;
} }
if (cc == no_condition) { if (cc == kNoCondition) {
if (FLAG_trap_on_deopt) __ stop("trap_on_deopt"); if (FLAG_trap_on_deopt) __ stop("trap_on_deopt");
__ Jump(entry, RelocInfo::RUNTIME_ENTRY); __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
} else { } else {
...@@ -1216,7 +1216,7 @@ void LCodeGen::DoMulI(LMulI* instr) { ...@@ -1216,7 +1216,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ b(ne, &done); __ b(ne, &done);
if (instr->InputAt(1)->IsConstantOperand()) { if (instr->InputAt(1)->IsConstantOperand()) {
if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) < 0) { if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) < 0) {
DeoptimizeIf(no_condition, instr->environment()); DeoptimizeIf(kNoCondition, instr->environment());
} }
} else { } else {
// Test the non-zero operand for negative sign. // Test the non-zero operand for negative sign.
...@@ -1483,7 +1483,7 @@ void LCodeGen::DoBranch(LBranch* instr) { ...@@ -1483,7 +1483,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (r.IsInteger32()) { if (r.IsInteger32()) {
Register reg = ToRegister(instr->InputAt(0)); Register reg = ToRegister(instr->InputAt(0));
__ cmp(reg, Operand(0)); __ cmp(reg, Operand(0));
EmitBranch(true_block, false_block, nz); EmitBranch(true_block, false_block, ne);
} else if (r.IsDouble()) { } else if (r.IsDouble()) {
DoubleRegister reg = ToDoubleRegister(instr->InputAt(0)); DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
Register scratch = scratch0(); Register scratch = scratch0();
...@@ -1541,7 +1541,7 @@ void LCodeGen::DoBranch(LBranch* instr) { ...@@ -1541,7 +1541,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ CallStub(&stub); __ CallStub(&stub);
__ cmp(reg, Operand(0)); __ cmp(reg, Operand(0));
__ ldm(ia_w, sp, saved_regs); __ ldm(ia_w, sp, saved_regs);
EmitBranch(true_block, false_block, nz); EmitBranch(true_block, false_block, ne);
} }
} }
} }
...@@ -1593,7 +1593,7 @@ void LCodeGen::DoGoto(LGoto* instr) { ...@@ -1593,7 +1593,7 @@ void LCodeGen::DoGoto(LGoto* instr) {
Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
Condition cond = no_condition; Condition cond = kNoCondition;
switch (op) { switch (op) {
case Token::EQ: case Token::EQ:
case Token::EQ_STRICT: case Token::EQ_STRICT:
...@@ -2136,7 +2136,7 @@ static Condition ComputeCompareCondition(Token::Value op) { ...@@ -2136,7 +2136,7 @@ static Condition ComputeCompareCondition(Token::Value op) {
return ge; return ge;
default: default:
UNREACHABLE(); UNREACHABLE();
return no_condition; return kNoCondition;
} }
} }
...@@ -3556,7 +3556,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, ...@@ -3556,7 +3556,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Label* false_label, Label* false_label,
Register input, Register input,
Handle<String> type_name) { Handle<String> type_name) {
Condition final_branch_condition = no_condition; Condition final_branch_condition = kNoCondition;
Register scratch = scratch0(); Register scratch = scratch0();
if (type_name->Equals(Heap::number_symbol())) { if (type_name->Equals(Heap::number_symbol())) {
__ tst(input, Operand(kSmiTagMask)); __ tst(input, Operand(kSmiTagMask));
...@@ -3641,7 +3641,7 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) { ...@@ -3641,7 +3641,7 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
void LCodeGen::DoDeoptimize(LDeoptimize* instr) { void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
DeoptimizeIf(no_condition, instr->environment()); DeoptimizeIf(kNoCondition, instr->environment());
} }
......
...@@ -318,7 +318,7 @@ void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) { ...@@ -318,7 +318,7 @@ void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
CheckConstPool(true, true); CheckConstPool(true, true);
add(pc, pc, Operand(index, add(pc, pc, Operand(index,
LSL, LSL,
assembler::arm::Instr::kInstrSizeLog2 - kSmiTagSize)); Instruction::kInstrSizeLog2 - kSmiTagSize));
BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize); BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
nop(); // Jump table alignment. nop(); // Jump table alignment.
for (int i = 0; i < targets.length(); i++) { for (int i = 0; i < targets.length(); i++) {
...@@ -369,12 +369,12 @@ void MacroAssembler::RecordWriteHelper(Register object, ...@@ -369,12 +369,12 @@ void MacroAssembler::RecordWriteHelper(Register object,
void MacroAssembler::InNewSpace(Register object, void MacroAssembler::InNewSpace(Register object,
Register scratch, Register scratch,
Condition cc, Condition cond,
Label* branch) { Label* branch) {
ASSERT(cc == eq || cc == ne); ASSERT(cond == eq || cond == ne);
and_(scratch, object, Operand(ExternalReference::new_space_mask())); and_(scratch, object, Operand(ExternalReference::new_space_mask()));
cmp(scratch, Operand(ExternalReference::new_space_start())); cmp(scratch, Operand(ExternalReference::new_space_start()));
b(cc, branch); b(cond, branch);
} }
...@@ -926,7 +926,7 @@ void MacroAssembler::IsObjectJSStringType(Register object, ...@@ -926,7 +926,7 @@ void MacroAssembler::IsObjectJSStringType(Register object,
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
tst(scratch, Operand(kIsNotStringMask)); tst(scratch, Operand(kIsNotStringMask));
b(nz, fail); b(ne, fail);
} }
...@@ -1806,9 +1806,9 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, ...@@ -1806,9 +1806,9 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
} }
void MacroAssembler::Assert(Condition cc, const char* msg) { void MacroAssembler::Assert(Condition cond, const char* msg) {
if (FLAG_debug_code) if (FLAG_debug_code)
Check(cc, msg); Check(cond, msg);
} }
...@@ -1841,9 +1841,9 @@ void MacroAssembler::AssertFastElements(Register elements) { ...@@ -1841,9 +1841,9 @@ void MacroAssembler::AssertFastElements(Register elements) {
} }
void MacroAssembler::Check(Condition cc, const char* msg) { void MacroAssembler::Check(Condition cond, const char* msg) {
Label L; Label L;
b(cc, &L); b(cond, &L);
Abort(msg); Abort(msg);
// will not return here // will not return here
bind(&L); bind(&L);
......
...@@ -139,7 +139,7 @@ class MacroAssembler: public Assembler { ...@@ -139,7 +139,7 @@ class MacroAssembler: public Assembler {
// scratch can be object itself, but it will be clobbered. // scratch can be object itself, but it will be clobbered.
void InNewSpace(Register object, void InNewSpace(Register object,
Register scratch, Register scratch,
Condition cc, // eq for new space, ne otherwise Condition cond, // eq for new space, ne otherwise
Label* branch); Label* branch);
...@@ -685,14 +685,14 @@ class MacroAssembler: public Assembler { ...@@ -685,14 +685,14 @@ class MacroAssembler: public Assembler {
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Debugging // Debugging
// Calls Abort(msg) if the condition cc is not satisfied. // Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable. // Use --debug_code to enable.
void Assert(Condition cc, const char* msg); void Assert(Condition cond, const char* msg);
void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index); void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
void AssertFastElements(Register elements); void AssertFastElements(Register elements);
// Like Assert(), but always enabled. // Like Assert(), but always enabled.
void Check(Condition cc, const char* msg); void Check(Condition cond, const char* msg);
// Print a message to stdout and abort execution. // Print a message to stdout and abort execution.
void Abort(const char* msg); void Abort(const char* msg);
......
This diff is collapsed.
...@@ -80,8 +80,8 @@ class SimulatorStack : public v8::internal::AllStatic { ...@@ -80,8 +80,8 @@ class SimulatorStack : public v8::internal::AllStatic {
#include "constants-arm.h" #include "constants-arm.h"
#include "hashmap.h" #include "hashmap.h"
namespace assembler { namespace v8 {
namespace arm { namespace internal {
class CachePage { class CachePage {
public: public:
...@@ -203,11 +203,11 @@ class Simulator { ...@@ -203,11 +203,11 @@ class Simulator {
}; };
// Unsupported instructions use Format to print an error and stop execution. // Unsupported instructions use Format to print an error and stop execution.
void Format(Instr* instr, const char* format); void Format(Instruction* instr, const char* format);
// Checks if the current instruction should be executed based on its // Checks if the current instruction should be executed based on its
// condition bits. // condition bits.
bool ConditionallyExecute(Instr* instr); bool ConditionallyExecute(Instruction* instr);
// Helper functions to set the conditional flags in the architecture state. // Helper functions to set the conditional flags in the architecture state.
void SetNZFlags(int32_t val); void SetNZFlags(int32_t val);
...@@ -225,13 +225,13 @@ class Simulator { ...@@ -225,13 +225,13 @@ class Simulator {
void Copy_FPSCR_to_APSR(); void Copy_FPSCR_to_APSR();
// Helper functions to decode common "addressing" modes // Helper functions to decode common "addressing" modes
int32_t GetShiftRm(Instr* instr, bool* carry_out); int32_t GetShiftRm(Instruction* instr, bool* carry_out);
int32_t GetImm(Instr* instr, bool* carry_out); int32_t GetImm(Instruction* instr, bool* carry_out);
void HandleRList(Instr* instr, bool load); void HandleRList(Instruction* instr, bool load);
void SoftwareInterrupt(Instr* instr); void SoftwareInterrupt(Instruction* instr);
// Stop helper functions. // Stop helper functions.
inline bool isStopInstruction(Instr* instr); inline bool isStopInstruction(Instruction* instr);
inline bool isWatchedStop(uint32_t bkpt_code); inline bool isWatchedStop(uint32_t bkpt_code);
inline bool isEnabledStop(uint32_t bkpt_code); inline bool isEnabledStop(uint32_t bkpt_code);
inline void EnableStop(uint32_t bkpt_code); inline void EnableStop(uint32_t bkpt_code);
...@@ -245,41 +245,42 @@ class Simulator { ...@@ -245,41 +245,42 @@ class Simulator {
inline void WriteB(int32_t addr, uint8_t value); inline void WriteB(int32_t addr, uint8_t value);
inline void WriteB(int32_t addr, int8_t value); inline void WriteB(int32_t addr, int8_t value);
inline uint16_t ReadHU(int32_t addr, Instr* instr); inline uint16_t ReadHU(int32_t addr, Instruction* instr);
inline int16_t ReadH(int32_t addr, Instr* instr); inline int16_t ReadH(int32_t addr, Instruction* instr);
// Note: Overloaded on the sign of the value. // Note: Overloaded on the sign of the value.
inline void WriteH(int32_t addr, uint16_t value, Instr* instr); inline void WriteH(int32_t addr, uint16_t value, Instruction* instr);
inline void WriteH(int32_t addr, int16_t value, Instr* instr); inline void WriteH(int32_t addr, int16_t value, Instruction* instr);
inline int ReadW(int32_t addr, Instr* instr); inline int ReadW(int32_t addr, Instruction* instr);
inline void WriteW(int32_t addr, int value, Instr* instr); inline void WriteW(int32_t addr, int value, Instruction* instr);
int32_t* ReadDW(int32_t addr); int32_t* ReadDW(int32_t addr);
void WriteDW(int32_t addr, int32_t value1, int32_t value2); void WriteDW(int32_t addr, int32_t value1, int32_t value2);
// Executing is handled based on the instruction type. // Executing is handled based on the instruction type.
void DecodeType01(Instr* instr); // both type 0 and type 1 rolled into one // Both type 0 and type 1 rolled into one.
void DecodeType2(Instr* instr); void DecodeType01(Instruction* instr);
void DecodeType3(Instr* instr); void DecodeType2(Instruction* instr);
void DecodeType4(Instr* instr); void DecodeType3(Instruction* instr);
void DecodeType5(Instr* instr); void DecodeType4(Instruction* instr);
void DecodeType6(Instr* instr); void DecodeType5(Instruction* instr);
void DecodeType7(Instr* instr); void DecodeType6(Instruction* instr);
void DecodeType7(Instruction* instr);
// Support for VFP. // Support for VFP.
void DecodeTypeVFP(Instr* instr); void DecodeTypeVFP(Instruction* instr);
void DecodeType6CoprocessorIns(Instr* instr); void DecodeType6CoprocessorIns(Instruction* instr);
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr); void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
void DecodeVCMP(Instr* instr); void DecodeVCMP(Instruction* instr);
void DecodeVCVTBetweenDoubleAndSingle(Instr* instr); void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
void DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr); void DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr);
// Executes one instruction. // Executes one instruction.
void InstructionDecode(Instr* instr); void InstructionDecode(Instruction* instr);
// ICache. // ICache.
static void CheckICache(Instr* instr); static void CheckICache(Instruction* instr);
static void FlushOnePage(intptr_t start, int size); static void FlushOnePage(intptr_t start, int size);
static CachePage* GetCachePage(void* page); static CachePage* GetCachePage(void* page);
...@@ -330,8 +331,8 @@ class Simulator { ...@@ -330,8 +331,8 @@ class Simulator {
static v8::internal::HashMap* i_cache_; static v8::internal::HashMap* i_cache_;
// Registered breakpoints. // Registered breakpoints.
Instr* break_pc_; Instruction* break_pc_;
instr_t break_instr_; Instr break_instr_;
// A stop is watched if its code is less than kNumOfWatchedStops. // A stop is watched if its code is less than kNumOfWatchedStops.
// Only watched stops support enabling/disabling and the counter feature. // Only watched stops support enabling/disabling and the counter feature.
...@@ -344,27 +345,22 @@ class Simulator { ...@@ -344,27 +345,22 @@ class Simulator {
// instruction, if bit 31 of watched_stops[code].count is unset. // instruction, if bit 31 of watched_stops[code].count is unset.
// The value watched_stops[code].count & ~(1 << 31) indicates how many times // The value watched_stops[code].count & ~(1 << 31) indicates how many times
// the breakpoint was hit or gone through. // the breakpoint was hit or gone through.
struct StopCoundAndDesc { struct StopCountAndDesc {
uint32_t count; uint32_t count;
char* desc; char* desc;
}; };
StopCoundAndDesc watched_stops[kNumOfWatchedStops]; StopCountAndDesc watched_stops[kNumOfWatchedStops];
}; };
} } // namespace assembler::arm
namespace v8 {
namespace internal {
// When running with the simulator transition into simulated execution at this // When running with the simulator transition into simulated execution at this
// point. // point.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \ #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
reinterpret_cast<Object*>(assembler::arm::Simulator::current()->Call( \ reinterpret_cast<Object*>(Simulator::current()->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4)) FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \ #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
assembler::arm::Simulator::current()->Call( \ Simulator::current()->Call( \
FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6) FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
...@@ -380,16 +376,16 @@ namespace internal { ...@@ -380,16 +376,16 @@ namespace internal {
class SimulatorStack : public v8::internal::AllStatic { class SimulatorStack : public v8::internal::AllStatic {
public: public:
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) { static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
return assembler::arm::Simulator::current()->StackLimit(); return Simulator::current()->StackLimit();
} }
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) { static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
assembler::arm::Simulator* sim = assembler::arm::Simulator::current(); Simulator* sim = Simulator::current();
return sim->PushAddress(try_catch_address); return sim->PushAddress(try_catch_address);
} }
static inline void UnregisterCTryCatch() { static inline void UnregisterCTryCatch() {
assembler::arm::Simulator::current()->PopAddress(); Simulator::current()->PopAddress();
} }
}; };
......
...@@ -72,7 +72,7 @@ void ThreadLocalTop::Initialize() { ...@@ -72,7 +72,7 @@ void ThreadLocalTop::Initialize() {
handler_ = 0; handler_ = 0;
#ifdef USE_SIMULATOR #ifdef USE_SIMULATOR
#ifdef V8_TARGET_ARCH_ARM #ifdef V8_TARGET_ARCH_ARM
simulator_ = assembler::arm::Simulator::current(); simulator_ = Simulator::current();
#elif V8_TARGET_ARCH_MIPS #elif V8_TARGET_ARCH_MIPS
simulator_ = assembler::mips::Simulator::current(); simulator_ = assembler::mips::Simulator::current();
#endif #endif
...@@ -1095,7 +1095,7 @@ char* Top::RestoreThread(char* from) { ...@@ -1095,7 +1095,7 @@ char* Top::RestoreThread(char* from) {
// thread_local_ is restored on a separate OS thread. // thread_local_ is restored on a separate OS thread.
#ifdef USE_SIMULATOR #ifdef USE_SIMULATOR
#ifdef V8_TARGET_ARCH_ARM #ifdef V8_TARGET_ARCH_ARM
thread_local_.simulator_ = assembler::arm::Simulator::current(); thread_local_.simulator_ = Simulator::current();
#elif V8_TARGET_ARCH_MIPS #elif V8_TARGET_ARCH_MIPS
thread_local_.simulator_ = assembler::mips::Simulator::current(); thread_local_.simulator_ = assembler::mips::Simulator::current();
#endif #endif
......
...@@ -109,7 +109,7 @@ class ThreadLocalTop BASE_EMBEDDED { ...@@ -109,7 +109,7 @@ class ThreadLocalTop BASE_EMBEDDED {
#ifdef USE_SIMULATOR #ifdef USE_SIMULATOR
#ifdef V8_TARGET_ARCH_ARM #ifdef V8_TARGET_ARCH_ARM
assembler::arm::Simulator* simulator_; Simulator* simulator_;
#elif V8_TARGET_ARCH_MIPS #elif V8_TARGET_ARCH_MIPS
assembler::mips::Simulator* simulator_; assembler::mips::Simulator* simulator_;
#endif #endif
......
...@@ -79,7 +79,7 @@ bool V8::Initialize(Deserializer* des) { ...@@ -79,7 +79,7 @@ bool V8::Initialize(Deserializer* des) {
// Initialize other runtime facilities // Initialize other runtime facilities
#if defined(USE_SIMULATOR) #if defined(USE_SIMULATOR)
#if defined(V8_TARGET_ARCH_ARM) #if defined(V8_TARGET_ARCH_ARM)
::assembler::arm::Simulator::Initialize(); Simulator::Initialize();
#elif defined(V8_TARGET_ARCH_MIPS) #elif defined(V8_TARGET_ARCH_MIPS)
::assembler::mips::Simulator::Initialize(); ::assembler::mips::Simulator::Initialize();
#endif #endif
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment