Commit 2e257d6c authored by Jacob.Bramley@arm.com's avatar Jacob.Bramley@arm.com

Reland r22247 "ARM64: Use UntagSmi helpers and clean up assertions."

BUG=
R=ulan@chromium.org

Review URL: https://codereview.chromium.org/371033002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22282 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 41b74fd2
......@@ -430,12 +430,16 @@ unsigned Operand::shift_amount() const {
Operand Operand::UntagSmi(Register smi) {
STATIC_ASSERT(kXRegSizeInBits == static_cast<unsigned>(kSmiShift +
kSmiValueSize));
ASSERT(smi.Is64Bits());
return Operand(smi, ASR, kSmiShift);
}
Operand Operand::UntagSmiAndScale(Register smi, int scale) {
STATIC_ASSERT(kXRegSizeInBits == static_cast<unsigned>(kSmiShift +
kSmiValueSize));
ASSERT(smi.Is64Bits());
ASSERT((scale >= 0) && (scale <= (64 - kSmiValueSize)));
if (scale > kSmiShift) {
......
......@@ -1246,7 +1246,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// TODO(jbramley): Check that the stack usage here is safe.
__ Sub(x10, jssp, x10);
// Check if the arguments will overflow the stack.
__ Cmp(x10, Operand(argc, LSR, kSmiShift - kPointerSizeLog2));
__ Cmp(x10, Operand::UntagSmiAndScale(argc, kPointerSizeLog2));
__ B(gt, &enough_stack_space);
// There is not enough stack space, so use a builtin to throw an appropriate
// error.
......
......@@ -1988,9 +1988,8 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
Register caller_fp = x10;
__ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
// Load and untag the context.
STATIC_ASSERT((kSmiShift / kBitsPerByte) == 4);
__ Ldr(w11, MemOperand(caller_fp, StandardFrameConstants::kContextOffset +
(kSmiShift / kBitsPerByte)));
__ Ldr(w11, UntagSmiMemOperand(caller_fp,
StandardFrameConstants::kContextOffset));
__ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR);
__ B(ne, &runtime);
......@@ -2838,8 +2837,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Store the smi values in the last match info.
__ SmiTag(x10, current_offset);
// Clearing the 32 bottom bits gives us a Smi.
STATIC_ASSERT(kSmiShift == 32);
__ And(x11, current_offset, ~kWRegMask);
STATIC_ASSERT(kSmiTag == 0);
__ Bic(x11, current_offset, kSmiShiftMask);
__ Stp(x10,
x11,
MemOperand(last_match_offsets, kXRegSize * 2, PostIndex));
......@@ -3478,8 +3477,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
// At this point code register contains smi tagged ASCII char code.
STATIC_ASSERT(kSmiShift > kPointerSizeLog2);
__ Add(result_, result_, Operand(code_, LSR, kSmiShift - kPointerSizeLog2));
__ Add(result_, result_, Operand::UntagSmiAndScale(code_, kPointerSizeLog2));
__ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
__ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_);
__ Bind(&exit_);
......@@ -3848,7 +3846,7 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm,
// hash = character + (character << 10);
__ LoadRoot(hash, Heap::kHashSeedRootIndex);
// Untag smi seed and add the character.
__ Add(hash, character, Operand(hash, LSR, kSmiShift));
__ Add(hash, character, Operand::UntagSmi(hash));
// Compute hashes modulo 2^32 using a 32-bit W register.
Register hash_w = hash.W();
......
......@@ -171,7 +171,8 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// jssp[8]: 0x00000000 (SMI tag & padding)
// jssp[4]: reg[31:0]
// jssp[0]: 0x00000000 (SMI tag & padding)
STATIC_ASSERT((kSmiTag == 0) && (kSmiShift == 32));
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(static_cast<unsigned>(kSmiShift) == kWRegSizeInBits);
}
if (object_regs != 0) {
......
......@@ -2029,11 +2029,12 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
break;
case Token::MUL: {
Label not_minus_zero, done;
STATIC_ASSERT(static_cast<unsigned>(kSmiShift) == (kXRegSizeInBits / 2));
STATIC_ASSERT(kSmiTag == 0);
__ Smulh(x10, left, right);
__ Cbnz(x10, &not_minus_zero);
__ Eor(x11, left, right);
__ Tbnz(x11, kXSignBit, &stub_call);
STATIC_ASSERT(kSmiTag == 0);
__ Mov(result, x10);
__ B(&done);
__ Bind(&not_minus_zero);
......@@ -2592,9 +2593,10 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
uint64_t sign_mask = V8_UINT64_C(1) << (kSmiShift + kSmiValueSize - 1);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ TestAndSplit(x0, kSmiTagMask | (0x80000000UL << kSmiShift), if_true,
if_false, fall_through);
__ TestAndSplit(x0, kSmiTagMask | sign_mask, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
......
......@@ -3525,7 +3525,8 @@ MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base,
ElementsKind elements_kind,
Representation representation,
int base_offset) {
STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
STATIC_ASSERT(kSmiTag == 0);
int element_size_shift = ElementsKindToShiftSize(elements_kind);
// Even though the HLoad/StoreKeyed instructions force the input
......@@ -3536,8 +3537,7 @@ MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base,
__ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
if (representation.IsInteger32()) {
ASSERT(elements_kind == FAST_SMI_ELEMENTS);
// Read or write only the most-significant 32 bits in the case of fast smi
// arrays.
// Read or write only the smi payload in the case of fast smi arrays.
return UntagSmiMemOperand(base, base_offset);
} else {
return MemOperand(base, base_offset);
......@@ -3548,8 +3548,7 @@ MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base,
ASSERT((element_size_shift >= 0) && (element_size_shift <= 4));
if (representation.IsInteger32()) {
ASSERT(elements_kind == FAST_SMI_ELEMENTS);
// Read or write only the most-significant 32 bits in the case of fast smi
// arrays.
// Read or write only the smi payload in the case of fast smi arrays.
__ Add(base, elements, Operand(key, SXTW, element_size_shift));
return UntagSmiMemOperand(base, base_offset);
} else {
......@@ -3612,8 +3611,8 @@ void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
ToInteger32(const_operand) * kPointerSize;
if (representation.IsInteger32()) {
ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) &&
(kSmiTag == 0));
STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
STATIC_ASSERT(kSmiTag == 0);
mem_op = UntagSmiMemOperand(elements, offset);
} else {
mem_op = MemOperand(elements, offset);
......@@ -3683,7 +3682,8 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
if (access.representation().IsSmi() &&
instr->hydrogen()->representation().IsInteger32()) {
// Read int value directly from upper half of the smi.
STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
STATIC_ASSERT(kSmiTag == 0);
__ Load(result, UntagSmiFieldMemOperand(source, offset),
Representation::Integer32());
} else {
......@@ -5286,8 +5286,8 @@ void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
if (representation.IsInteger32()) {
ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) &&
(kSmiTag == 0));
STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
STATIC_ASSERT(kSmiTag == 0);
mem_op = UntagSmiMemOperand(store_base, offset);
} else {
mem_op = MemOperand(store_base, offset);
......@@ -5406,7 +5406,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
__ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
}
#endif
STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
STATIC_ASSERT(kSmiTag == 0);
__ Store(value, UntagSmiFieldMemOperand(destination, offset),
Representation::Integer32());
} else {
......
......@@ -1308,6 +1308,8 @@ void MacroAssembler::InitializeRootRegister() {
void MacroAssembler::SmiTag(Register dst, Register src) {
STATIC_ASSERT(kXRegSizeInBits ==
static_cast<unsigned>(kSmiShift + kSmiValueSize));
ASSERT(dst.Is64Bits() && src.Is64Bits());
Lsl(dst, src, kSmiShift);
}
......@@ -1317,6 +1319,8 @@ void MacroAssembler::SmiTag(Register smi) { SmiTag(smi, smi); }
void MacroAssembler::SmiUntag(Register dst, Register src) {
STATIC_ASSERT(kXRegSizeInBits ==
static_cast<unsigned>(kSmiShift + kSmiValueSize));
ASSERT(dst.Is64Bits() && src.Is64Bits());
if (FLAG_enable_slow_asserts) {
AssertSmi(src);
......@@ -1351,13 +1355,17 @@ void MacroAssembler::SmiUntagToFloat(FPRegister dst,
void MacroAssembler::SmiTagAndPush(Register src) {
STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
STATIC_ASSERT((static_cast<unsigned>(kSmiShift) == kWRegSizeInBits) &&
(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits) &&
(kSmiTag == 0));
Push(src.W(), wzr);
}
void MacroAssembler::SmiTagAndPush(Register src1, Register src2) {
STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
STATIC_ASSERT((static_cast<unsigned>(kSmiShift) == kWRegSizeInBits) &&
(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits) &&
(kSmiTag == 0));
Push(src1.W(), wzr, src2.W(), wzr);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment