Commit 7a45af14 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[ptr-compr] Unify SmiTag/SmiUntag operations in hand-written assembly code.

The result of SmiUntag is a sign-extended word-size value.

Bug: v8:7703
Change-Id: I85dc87b541cab78286e47e2147c16c6a0939f509
Reviewed-on: https://chromium-review.googlesource.com/1073232Reviewed-by: 's avatarJaroslav Sevcik <jarin@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#53389}
parent 7032b91d
...@@ -24,17 +24,6 @@ MemOperand FieldMemOperand(Register object, int offset) { ...@@ -24,17 +24,6 @@ MemOperand FieldMemOperand(Register object, int offset) {
} }
MemOperand UntagSmiFieldMemOperand(Register object, int offset) {
return UntagSmiMemOperand(object, offset - kHeapObjectTag);
}
MemOperand UntagSmiMemOperand(Register object, int offset) {
// Assumes that Smis are shifted by 32 bits and little endianness.
STATIC_ASSERT(kSmiShift == 32);
return MemOperand(object, offset + (kSmiShift / kBitsPerByte));
}
void TurboAssembler::And(const Register& rd, const Register& rn, void TurboAssembler::And(const Register& rd, const Register& rn,
const Operand& operand) { const Operand& operand) {
DCHECK(allow_macro_instructions()); DCHECK(allow_macro_instructions());
...@@ -1063,23 +1052,27 @@ void TurboAssembler::SmiUntag(Register dst, Register src) { ...@@ -1063,23 +1052,27 @@ void TurboAssembler::SmiUntag(Register dst, Register src) {
Asr(dst, src, kSmiShift); Asr(dst, src, kSmiShift);
} }
void TurboAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); } void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
STATIC_ASSERT(kXRegSizeInBits ==
void MacroAssembler::SmiUntagToDouble(VRegister dst, Register src) { static_cast<unsigned>(kSmiShift + kSmiValueSize));
DCHECK(dst.Is64Bits() && src.Is64Bits()); DCHECK(dst.Is64Bits());
if (FLAG_enable_slow_asserts) { if (src.IsImmediateOffset() && src.shift_amount() == 0) {
AssertSmi(src); if (FLAG_enable_slow_asserts) {
Ldr(dst, src);
AssertSmi(dst);
}
// Load value directly from the upper half-word.
// Assumes that Smis are shifted by 32 bits and little endianness.
DCHECK_EQ(kSmiShift, 32);
Ldrsw(dst, MemOperand(src.base(), src.offset() + (kSmiShift / kBitsPerByte),
src.addrmode()));
} else {
Ldr(dst, src);
SmiUntag(dst);
} }
Scvtf(dst, src, kSmiShift);
} }
void MacroAssembler::SmiUntagToFloat(VRegister dst, Register src) { void TurboAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); }
DCHECK(dst.Is32Bits() && src.Is64Bits());
if (FLAG_enable_slow_asserts) {
AssertSmi(src);
}
Scvtf(dst, src, kSmiShift);
}
void TurboAssembler::JumpIfSmi(Register value, Label* smi_label, void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
Label* not_smi_label) { Label* not_smi_label) {
......
...@@ -97,11 +97,6 @@ namespace internal { ...@@ -97,11 +97,6 @@ namespace internal {
// Generate a MemOperand for loading a field from an object. // Generate a MemOperand for loading a field from an object.
inline MemOperand FieldMemOperand(Register object, int offset); inline MemOperand FieldMemOperand(Register object, int offset);
inline MemOperand UntagSmiFieldMemOperand(Register object, int offset);
// Generate a MemOperand for loading a SMI from memory.
inline MemOperand UntagSmiMemOperand(Register object, int offset);
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// MacroAssembler // MacroAssembler
...@@ -587,6 +582,7 @@ class TurboAssembler : public Assembler { ...@@ -587,6 +582,7 @@ class TurboAssembler : public Assembler {
Register scratch1); Register scratch1);
inline void SmiUntag(Register dst, Register src); inline void SmiUntag(Register dst, Register src);
inline void SmiUntag(Register dst, const MemOperand& src);
inline void SmiUntag(Register smi); inline void SmiUntag(Register smi);
// Calls Abort(msg) if the condition cond is not satisfied. // Calls Abort(msg) if the condition cond is not satisfied.
...@@ -1716,8 +1712,6 @@ class MacroAssembler : public TurboAssembler { ...@@ -1716,8 +1712,6 @@ class MacroAssembler : public TurboAssembler {
inline void SmiTag(Register dst, Register src); inline void SmiTag(Register dst, Register src);
inline void SmiTag(Register smi); inline void SmiTag(Register smi);
inline void SmiUntagToDouble(VRegister dst, Register src);
inline void SmiUntagToFloat(VRegister dst, Register src);
inline void JumpIfNotSmi(Register value, Label* not_smi_label); inline void JumpIfNotSmi(Register value, Label* not_smi_label);
inline void JumpIfBothSmi(Register value1, Register value2, inline void JumpIfBothSmi(Register value1, Register value2,
......
...@@ -266,8 +266,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { ...@@ -266,8 +266,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Restore smi-tagged arguments count from the frame. Use fp relative // Restore smi-tagged arguments count from the frame. Use fp relative
// addressing to avoid the circular dependency between padding existence and // addressing to avoid the circular dependency between padding existence and
// argc parity. // argc parity.
__ Ldrsw(x1, __ SmiUntag(x1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
UntagSmiMemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame. // Leave construct frame.
} }
...@@ -351,8 +350,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { ...@@ -351,8 +350,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Restore constructor function and argument count. // Restore constructor function and argument count.
__ Ldr(x1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset)); __ Ldr(x1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ Ldrsw(x12, __ SmiUntag(x12, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
UntagSmiMemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Copy arguments to the expression stack. The called function pops the // Copy arguments to the expression stack. The called function pops the
// receiver along with its arguments, so we need an extra receiver on the // receiver along with its arguments, so we need an extra receiver on the
...@@ -451,8 +449,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { ...@@ -451,8 +449,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Bind(&leave_frame); __ Bind(&leave_frame);
// Restore smi-tagged arguments count from the frame. // Restore smi-tagged arguments count from the frame.
__ Ldrsw(x1, __ SmiUntag(x1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
UntagSmiMemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame. // Leave construct frame.
} }
// Remove caller arguments from the stack and return. // Remove caller arguments from the stack and return.
...@@ -1124,7 +1121,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1124,7 +1121,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ JumpIfRoot(x10, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded); __ JumpIfRoot(x10, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
__ Mov(kInterpreterBytecodeArrayRegister, x10); __ Mov(kInterpreterBytecodeArrayRegister, x10);
__ Ldr(x10, UntagSmiFieldMemOperand(x11, DebugInfo::kFlagsOffset)); __ SmiUntag(x10, FieldMemOperand(x11, DebugInfo::kFlagsOffset));
__ And(x10, x10, Immediate(DebugInfo::kDebugExecutionMode)); __ And(x10, x10, Immediate(DebugInfo::kDebugExecutionMode));
STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) == STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
...@@ -1659,7 +1656,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) { ...@@ -1659,7 +1656,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ JumpIfSmi(x0, &failed); __ JumpIfSmi(x0, &failed);
// Peek the argument count from the stack, untagging at the same time. // Peek the argument count from the stack, untagging at the same time.
__ Ldr(w4, UntagSmiMemOperand(sp, 3 * kPointerSize)); __ SmiUntag(x4, MemOperand(sp, 3 * kPointerSize));
__ Drop(4); __ Drop(4);
scope.GenerateLeaveFrame(); scope.GenerateLeaveFrame();
...@@ -1803,9 +1800,9 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm, ...@@ -1803,9 +1800,9 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
// Load the OSR entrypoint offset from the deoptimization data. // Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset] // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
__ Ldrsw(w1, UntagSmiFieldMemOperand( __ SmiUntag(x1,
x1, FixedArray::OffsetOfElementAt( FieldMemOperand(x1, FixedArray::OffsetOfElementAt(
DeoptimizationData::kOsrPcOffsetIndex))); DeoptimizationData::kOsrPcOffsetIndex)));
// Compute the target address = code_obj + header_size + osr_offset // Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset> // <entry_addr> = <code_obj> + #header_size + <osr_offset>
...@@ -2340,9 +2337,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, ...@@ -2340,9 +2337,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ Bind(&arguments_adaptor); __ Bind(&arguments_adaptor);
{ {
// Just load the length from ArgumentsAdaptorFrame. // Just load the length from ArgumentsAdaptorFrame.
__ Ldrsw(len, __ SmiUntag(
UntagSmiMemOperand( len,
args_fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); MemOperand(args_fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
} }
__ Bind(&arguments_done); __ Bind(&arguments_done);
} }
...@@ -2489,8 +2486,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { ...@@ -2489,8 +2486,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
Label no_bound_arguments; Label no_bound_arguments;
__ Ldr(bound_argv, __ Ldr(bound_argv,
FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset)); FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
__ Ldrsw(bound_argc, __ SmiUntag(bound_argc,
UntagSmiFieldMemOperand(bound_argv, FixedArray::kLengthOffset)); FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
__ Cbz(bound_argc, &no_bound_arguments); __ Cbz(bound_argc, &no_bound_arguments);
{ {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
...@@ -3620,9 +3617,9 @@ void Builtins::Generate_ArrayConstructorImpl(MacroAssembler* masm) { ...@@ -3620,9 +3617,9 @@ void Builtins::Generate_ArrayConstructorImpl(MacroAssembler* masm) {
// Get the elements kind and case on that. // Get the elements kind and case on that.
__ JumpIfRoot(allocation_site, Heap::kUndefinedValueRootIndex, &no_info); __ JumpIfRoot(allocation_site, Heap::kUndefinedValueRootIndex, &no_info);
__ Ldrsw(kind, UntagSmiFieldMemOperand( __ SmiUntag(kind, FieldMemOperand(
allocation_site, allocation_site,
AllocationSite::kTransitionInfoOrBoilerplateOffset)); AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ And(kind, kind, AllocationSite::ElementsKindBits::kMask); __ And(kind, kind, AllocationSite::ElementsKindBits::kMask);
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE); GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
......
...@@ -1230,9 +1230,8 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { ...@@ -1230,9 +1230,8 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
} }
// Get the target bytecode offset from the frame. // Get the target bytecode offset from the frame.
__ Lw( __ SmiUntag(kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeOffsetRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
UntagSmiMemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
// Dispatch to the target bytecode. // Dispatch to the target bytecode.
__ Daddu(a1, kInterpreterBytecodeArrayRegister, __ Daddu(a1, kInterpreterBytecodeArrayRegister,
...@@ -1305,9 +1304,7 @@ static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data, ...@@ -1305,9 +1304,7 @@ static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
// IsSmi: Is builtin // IsSmi: Is builtin
__ JumpIfNotSmi(sfi_data, &check_is_bytecode_array); __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
__ li(scratch1, ExternalReference::builtins_address(masm->isolate())); __ li(scratch1, ExternalReference::builtins_address(masm->isolate()));
// Avoid untagging the Smi by merging the shift __ SmiScale(sfi_data, sfi_data, kPointerSizeLog2);
STATIC_ASSERT(kPointerSizeLog2 < kSmiShift);
__ dsrl(sfi_data, sfi_data, kSmiShift - kPointerSizeLog2);
__ Daddu(scratch1, scratch1, sfi_data); __ Daddu(scratch1, scratch1, sfi_data);
__ Ld(sfi_data, MemOperand(scratch1)); __ Ld(sfi_data, MemOperand(scratch1));
__ Branch(&done); __ Branch(&done);
...@@ -1638,9 +1635,9 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm, ...@@ -1638,9 +1635,9 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
// Load the OSR entrypoint offset from the deoptimization data. // Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset] // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
__ Lw(a1, UntagSmiMemOperand(a1, FixedArray::OffsetOfElementAt( __ SmiUntag(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
DeoptimizationData::kOsrPcOffsetIndex) - DeoptimizationData::kOsrPcOffsetIndex) -
kHeapObjectTag)); kHeapObjectTag));
// Compute the target address = code_obj + header_size + osr_offset // Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset> // <entry_addr> = <code_obj> + #header_size + <osr_offset>
...@@ -1887,8 +1884,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { ...@@ -1887,8 +1884,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
} }
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
// __ sll(a0, a0, kSmiTagSize); __ SmiTag(a0);
__ dsll32(a0, a0, 0);
__ li(a4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); __ li(a4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ MultiPush(a0.bit() | a1.bit() | a4.bit() | fp.bit() | ra.bit()); __ MultiPush(a0.bit() | a1.bit() | a4.bit() | fp.bit() | ra.bit());
__ Push(Smi::kZero); // Padding. __ Push(Smi::kZero); // Padding.
...@@ -2015,8 +2011,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, ...@@ -2015,8 +2011,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ bind(&arguments_adaptor); __ bind(&arguments_adaptor);
{ {
// Just get the length from the ArgumentsAdaptorFrame. // Just get the length from the ArgumentsAdaptorFrame.
__ Lw(a7, UntagSmiMemOperand( __ SmiUntag(a7,
a6, ArgumentsAdaptorFrameConstants::kLengthOffset)); MemOperand(a6, ArgumentsAdaptorFrameConstants::kLengthOffset));
} }
__ bind(&arguments_done); __ bind(&arguments_done);
...@@ -2174,7 +2170,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { ...@@ -2174,7 +2170,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// Load [[BoundArguments]] into a2 and length of that into a4. // Load [[BoundArguments]] into a2 and length of that into a4.
__ Ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset)); __ Ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
__ Lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset)); __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver) // -- a0 : the number of arguments (not including the receiver)
...@@ -2221,7 +2217,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { ...@@ -2221,7 +2217,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// Copy [[BoundArguments]] to the stack (below the arguments). // Copy [[BoundArguments]] to the stack (below the arguments).
{ {
Label loop, done_loop; Label loop, done_loop;
__ Lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset)); __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
__ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop); __ bind(&loop);
__ Dsubu(a4, a4, Operand(1)); __ Dsubu(a4, a4, Operand(1));
...@@ -2327,7 +2323,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { ...@@ -2327,7 +2323,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Load [[BoundArguments]] into a2 and length of that into a4. // Load [[BoundArguments]] into a2 and length of that into a4.
__ Ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset)); __ Ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
__ Lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset)); __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver) // -- a0 : the number of arguments (not including the receiver)
...@@ -2375,7 +2371,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { ...@@ -2375,7 +2371,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Copy [[BoundArguments]] to the stack (below the arguments). // Copy [[BoundArguments]] to the stack (below the arguments).
{ {
Label loop, done_loop; Label loop, done_loop;
__ Lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset)); __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
__ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop); __ bind(&loop);
__ Dsubu(a4, a4, Operand(1)); __ Dsubu(a4, a4, Operand(1));
......
This diff is collapsed.
...@@ -563,9 +563,8 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg, ...@@ -563,9 +563,8 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
// Load arguments count from current arguments adaptor frame (note, it // Load arguments count from current arguments adaptor frame (note, it
// does not include receiver). // does not include receiver).
Register caller_args_count_reg = scratch1; Register caller_args_count_reg = scratch1;
__ SmiToInteger32( __ SmiUntag(caller_args_count_reg,
caller_args_count_reg, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
ParameterCount callee_args_count(args_reg); ParameterCount callee_args_count(args_reg);
__ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2, __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
......
...@@ -5346,9 +5346,9 @@ void MacroAssembler::AssertStackIsAligned() { ...@@ -5346,9 +5346,9 @@ void MacroAssembler::AssertStackIsAligned() {
} }
} }
void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) { void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
if (SmiValuesAre32Bits()) { if (SmiValuesAre32Bits()) {
Lw(dst, UntagSmiMemOperand(src.rm(), src.offset())); Lw(dst, MemOperand(src.rm(), SmiWordOffset(src.offset())));
} else { } else {
Lw(dst, src); Lw(dst, src);
SmiUntag(dst); SmiUntag(dst);
......
...@@ -131,18 +131,6 @@ inline MemOperand FieldMemOperand(Register object, int offset) { ...@@ -131,18 +131,6 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
} }
inline MemOperand UntagSmiMemOperand(Register rm, int offset) {
// Assumes that Smis are shifted by 32 bits.
STATIC_ASSERT(kSmiShift == 32);
return MemOperand(rm, SmiWordOffset(offset));
}
inline MemOperand UntagSmiFieldMemOperand(Register rm, int offset) {
return UntagSmiMemOperand(rm, offset - kHeapObjectTag);
}
// Generate a MemOperand for storing arguments 5..N on the stack // Generate a MemOperand for storing arguments 5..N on the stack
// when calling CallCFunction(). // when calling CallCFunction().
// TODO(plind): Currently ONLY used for O32. Should be fixed for // TODO(plind): Currently ONLY used for O32. Should be fixed for
...@@ -508,6 +496,7 @@ class TurboAssembler : public Assembler { ...@@ -508,6 +496,7 @@ class TurboAssembler : public Assembler {
#undef DEFINE_INSTRUCTION2 #undef DEFINE_INSTRUCTION2
#undef DEFINE_INSTRUCTION3 #undef DEFINE_INSTRUCTION3
void SmiUntag(Register dst, const MemOperand& src);
void SmiUntag(Register dst, Register src) { void SmiUntag(Register dst, Register src) {
if (SmiValuesAre32Bits()) { if (SmiValuesAre32Bits()) {
STATIC_ASSERT(kSmiShift == 32); STATIC_ASSERT(kSmiShift == 32);
...@@ -1199,9 +1188,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT ...@@ -1199,9 +1188,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
} }
} }
// Combine load with untagging or scaling.
void SmiLoadUntag(Register dst, MemOperand src);
// Test if the register contains a smi. // Test if the register contains a smi.
inline void SmiTst(Register value, Register scratch) { inline void SmiTst(Register value, Register scratch) {
And(scratch, value, Operand(kSmiTagMask)); And(scratch, value, Operand(kSmiTagMask));
......
...@@ -1098,48 +1098,31 @@ void TurboAssembler::Move(Register dst, ExternalReference ext) { ...@@ -1098,48 +1098,31 @@ void TurboAssembler::Move(Register dst, ExternalReference ext) {
movp(dst, ext.address(), RelocInfo::EXTERNAL_REFERENCE); movp(dst, ext.address(), RelocInfo::EXTERNAL_REFERENCE);
} }
void MacroAssembler::Integer32ToSmi(Register dst, Register src) { void MacroAssembler::SmiTag(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
if (dst != src) { if (dst != src) {
movl(dst, src); movp(dst, src);
} }
shlp(dst, Immediate(kSmiShift)); shlp(dst, Immediate(kSmiShift));
} }
void TurboAssembler::SmiToInteger32(Register dst, Register src) { void TurboAssembler::SmiUntag(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
if (dst != src) { if (dst != src) {
movp(dst, src); movp(dst, src);
} }
sarp(dst, Immediate(kSmiShift));
if (SmiValuesAre32Bits()) {
shrp(dst, Immediate(kSmiShift));
} else {
DCHECK(SmiValuesAre31Bits());
sarl(dst, Immediate(kSmiShift));
}
} }
void TurboAssembler::SmiToInteger32(Register dst, Operand src) { void TurboAssembler::SmiUntag(Register dst, Operand src) {
if (SmiValuesAre32Bits()) { if (SmiValuesAre32Bits()) {
movl(dst, Operand(src, kSmiShift / kBitsPerByte)); movl(dst, Operand(src, kSmiShift / kBitsPerByte));
// Sign extend to 64-bit.
movsxlq(dst, dst);
} else { } else {
DCHECK(SmiValuesAre31Bits()); DCHECK(SmiValuesAre31Bits());
movl(dst, src);
sarl(dst, Immediate(kSmiShift));
}
}
void MacroAssembler::SmiToInteger64(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0);
if (dst != src) {
movp(dst, src); movp(dst, src);
} sarp(dst, Immediate(kSmiShift));
sarp(dst, Immediate(kSmiShift));
if (kPointerSize == kInt32Size) {
// Sign extend to 64-bit.
movsxlq(dst, dst);
} }
} }
...@@ -2385,13 +2368,13 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target, ...@@ -2385,13 +2368,13 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
FrameScope frame(this, FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
if (expected.is_reg()) { if (expected.is_reg()) {
Integer32ToSmi(expected.reg(), expected.reg()); SmiTag(expected.reg(), expected.reg());
Push(expected.reg()); Push(expected.reg());
} }
if (actual.is_reg()) { if (actual.is_reg()) {
Integer32ToSmi(actual.reg(), actual.reg()); SmiTag(actual.reg(), actual.reg());
Push(actual.reg()); Push(actual.reg());
SmiToInteger64(actual.reg(), actual.reg()); SmiUntag(actual.reg(), actual.reg());
} }
if (new_target.is_valid()) { if (new_target.is_valid()) {
Push(new_target); Push(new_target);
...@@ -2406,11 +2389,11 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target, ...@@ -2406,11 +2389,11 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
} }
if (actual.is_reg()) { if (actual.is_reg()) {
Pop(actual.reg()); Pop(actual.reg());
SmiToInteger64(actual.reg(), actual.reg()); SmiUntag(actual.reg(), actual.reg());
} }
if (expected.is_reg()) { if (expected.is_reg()) {
Pop(expected.reg()); Pop(expected.reg());
SmiToInteger64(expected.reg(), expected.reg()); SmiUntag(expected.reg(), expected.reg());
} }
} }
bind(&skip_hook); bind(&skip_hook);
......
...@@ -357,10 +357,9 @@ class TurboAssembler : public Assembler { ...@@ -357,10 +357,9 @@ class TurboAssembler : public Assembler {
movp(dst, ptr, rmode); movp(dst, ptr, rmode);
} }
// Convert smi to 32-bit integer. I.e., not sign extended into // Convert smi to word-size sign-extended value.
// high 32 bits of destination. void SmiUntag(Register dst, Register src);
void SmiToInteger32(Register dst, Register src); void SmiUntag(Register dst, Operand src);
void SmiToInteger32(Register dst, Operand src);
// Loads the address of the external reference into the destination // Loads the address of the external reference into the destination
// register. // register.
...@@ -681,13 +680,8 @@ class MacroAssembler : public TurboAssembler { ...@@ -681,13 +680,8 @@ class MacroAssembler : public TurboAssembler {
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Conversions between tagged smi values and non-tagged integer values. // Conversions between tagged smi values and non-tagged integer values.
// Tag an integer value. The result must be known to be a valid smi value. // Tag an word-size value. The result must be known to be a valid smi value.
// Only uses the low 32 bits of the src register. Sets the N and Z flags void SmiTag(Register dst, Register src);
// based on the value of the resulting smi.
void Integer32ToSmi(Register dst, Register src);
// Convert smi to 64-bit integer (sign extended if necessary).
void SmiToInteger64(Register dst, Register src);
// Simple comparison of smis. Both sides must be known smis to use these, // Simple comparison of smis. Both sides must be known smis to use these,
// otherwise use Cmp. // otherwise use Cmp.
......
...@@ -228,9 +228,7 @@ TEST(SmiCompare) { ...@@ -228,9 +228,7 @@ TEST(SmiCompare) {
CHECK_EQ(0, result); CHECK_EQ(0, result);
} }
TEST(SmiTag) {
TEST(Integer32ToSmi) {
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate); HandleScope handles(isolate);
size_t allocated; size_t allocated;
...@@ -243,36 +241,36 @@ TEST(Integer32ToSmi) { ...@@ -243,36 +241,36 @@ TEST(Integer32ToSmi) {
Label exit; Label exit;
__ movq(rax, Immediate(1)); // Test number. __ movq(rax, Immediate(1)); // Test number.
__ movl(rcx, Immediate(0)); __ movq(rcx, Immediate(0));
__ Integer32ToSmi(rcx, rcx); __ SmiTag(rcx, rcx);
__ Set(rdx, reinterpret_cast<intptr_t>(Smi::kZero)); __ Set(rdx, reinterpret_cast<intptr_t>(Smi::kZero));
__ cmpq(rcx, rdx); __ cmpq(rcx, rdx);
__ j(not_equal, &exit); __ j(not_equal, &exit);
__ movq(rax, Immediate(2)); // Test number. __ movq(rax, Immediate(2)); // Test number.
__ movl(rcx, Immediate(1024)); __ movq(rcx, Immediate(1024));
__ Integer32ToSmi(rcx, rcx); __ SmiTag(rcx, rcx);
__ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(1024))); __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(1024)));
__ cmpq(rcx, rdx); __ cmpq(rcx, rdx);
__ j(not_equal, &exit); __ j(not_equal, &exit);
__ movq(rax, Immediate(3)); // Test number. __ movq(rax, Immediate(3)); // Test number.
__ movl(rcx, Immediate(-1)); __ movq(rcx, Immediate(-1));
__ Integer32ToSmi(rcx, rcx); __ SmiTag(rcx, rcx);
__ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(-1))); __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(-1)));
__ cmpq(rcx, rdx); __ cmpq(rcx, rdx);
__ j(not_equal, &exit); __ j(not_equal, &exit);
__ movq(rax, Immediate(4)); // Test number. __ movq(rax, Immediate(4)); // Test number.
__ movl(rcx, Immediate(Smi::kMaxValue)); __ movq(rcx, Immediate(Smi::kMaxValue));
__ Integer32ToSmi(rcx, rcx); __ SmiTag(rcx, rcx);
__ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMaxValue))); __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMaxValue)));
__ cmpq(rcx, rdx); __ cmpq(rcx, rdx);
__ j(not_equal, &exit); __ j(not_equal, &exit);
__ movq(rax, Immediate(5)); // Test number. __ movq(rax, Immediate(5)); // Test number.
__ movl(rcx, Immediate(Smi::kMinValue)); __ movq(rcx, Immediate(Smi::kMinValue));
__ Integer32ToSmi(rcx, rcx); __ SmiTag(rcx, rcx);
__ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMinValue))); __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMinValue)));
__ cmpq(rcx, rdx); __ cmpq(rcx, rdx);
__ j(not_equal, &exit); __ j(not_equal, &exit);
...@@ -280,36 +278,36 @@ TEST(Integer32ToSmi) { ...@@ -280,36 +278,36 @@ TEST(Integer32ToSmi) {
// Different target register. // Different target register.
__ movq(rax, Immediate(6)); // Test number. __ movq(rax, Immediate(6)); // Test number.
__ movl(rcx, Immediate(0)); __ movq(rcx, Immediate(0));
__ Integer32ToSmi(r8, rcx); __ SmiTag(r8, rcx);
__ Set(rdx, reinterpret_cast<intptr_t>(Smi::kZero)); __ Set(rdx, reinterpret_cast<intptr_t>(Smi::kZero));
__ cmpq(r8, rdx); __ cmpq(r8, rdx);
__ j(not_equal, &exit); __ j(not_equal, &exit);
__ movq(rax, Immediate(7)); // Test number. __ movq(rax, Immediate(7)); // Test number.
__ movl(rcx, Immediate(1024)); __ movq(rcx, Immediate(1024));
__ Integer32ToSmi(r8, rcx); __ SmiTag(r8, rcx);
__ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(1024))); __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(1024)));
__ cmpq(r8, rdx); __ cmpq(r8, rdx);
__ j(not_equal, &exit); __ j(not_equal, &exit);
__ movq(rax, Immediate(8)); // Test number. __ movq(rax, Immediate(8)); // Test number.
__ movl(rcx, Immediate(-1)); __ movq(rcx, Immediate(-1));
__ Integer32ToSmi(r8, rcx); __ SmiTag(r8, rcx);
__ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(-1))); __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(-1)));
__ cmpq(r8, rdx); __ cmpq(r8, rdx);
__ j(not_equal, &exit); __ j(not_equal, &exit);
__ movq(rax, Immediate(9)); // Test number. __ movq(rax, Immediate(9)); // Test number.
__ movl(rcx, Immediate(Smi::kMaxValue)); __ movq(rcx, Immediate(Smi::kMaxValue));
__ Integer32ToSmi(r8, rcx); __ SmiTag(r8, rcx);
__ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMaxValue))); __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMaxValue)));
__ cmpq(r8, rdx); __ cmpq(r8, rdx);
__ j(not_equal, &exit); __ j(not_equal, &exit);
__ movq(rax, Immediate(10)); // Test number. __ movq(rax, Immediate(10)); // Test number.
__ movl(rcx, Immediate(Smi::kMinValue)); __ movq(rcx, Immediate(Smi::kMinValue));
__ Integer32ToSmi(r8, rcx); __ SmiTag(r8, rcx);
__ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMinValue))); __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMinValue)));
__ cmpq(r8, rdx); __ cmpq(r8, rdx);
__ j(not_equal, &exit); __ j(not_equal, &exit);
...@@ -347,7 +345,7 @@ TEST(SmiCheck) { ...@@ -347,7 +345,7 @@ TEST(SmiCheck) {
// CheckSmi // CheckSmi
__ movl(rcx, Immediate(0)); __ movl(rcx, Immediate(0));
__ Integer32ToSmi(rcx, rcx); __ SmiTag(rcx, rcx);
cond = masm->CheckSmi(rcx); cond = masm->CheckSmi(rcx);
__ j(NegateCondition(cond), &exit); __ j(NegateCondition(cond), &exit);
...@@ -358,7 +356,7 @@ TEST(SmiCheck) { ...@@ -358,7 +356,7 @@ TEST(SmiCheck) {
__ incq(rax); __ incq(rax);
__ movl(rcx, Immediate(-1)); __ movl(rcx, Immediate(-1));
__ Integer32ToSmi(rcx, rcx); __ SmiTag(rcx, rcx);
cond = masm->CheckSmi(rcx); cond = masm->CheckSmi(rcx);
__ j(NegateCondition(cond), &exit); __ j(NegateCondition(cond), &exit);
...@@ -369,7 +367,7 @@ TEST(SmiCheck) { ...@@ -369,7 +367,7 @@ TEST(SmiCheck) {
__ incq(rax); __ incq(rax);
__ movl(rcx, Immediate(Smi::kMaxValue)); __ movl(rcx, Immediate(Smi::kMaxValue));
__ Integer32ToSmi(rcx, rcx); __ SmiTag(rcx, rcx);
cond = masm->CheckSmi(rcx); cond = masm->CheckSmi(rcx);
__ j(NegateCondition(cond), &exit); __ j(NegateCondition(cond), &exit);
...@@ -380,7 +378,7 @@ TEST(SmiCheck) { ...@@ -380,7 +378,7 @@ TEST(SmiCheck) {
__ incq(rax); __ incq(rax);
__ movl(rcx, Immediate(Smi::kMinValue)); __ movl(rcx, Immediate(Smi::kMinValue));
__ Integer32ToSmi(rcx, rcx); __ SmiTag(rcx, rcx);
cond = masm->CheckSmi(rcx); cond = masm->CheckSmi(rcx);
__ j(NegateCondition(cond), &exit); __ j(NegateCondition(cond), &exit);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment