Commit 7a45af14 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[ptr-compr] Unify SmiTag/SmiUntag operations in hand-written assembly code.

The result of SmiUntag is a sign-extended word-size value.

Bug: v8:7703
Change-Id: I85dc87b541cab78286e47e2147c16c6a0939f509
Reviewed-on: https://chromium-review.googlesource.com/1073232Reviewed-by: 's avatarJaroslav Sevcik <jarin@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#53389}
parent 7032b91d
......@@ -24,17 +24,6 @@ MemOperand FieldMemOperand(Register object, int offset) {
}
MemOperand UntagSmiFieldMemOperand(Register object, int offset) {
return UntagSmiMemOperand(object, offset - kHeapObjectTag);
}
MemOperand UntagSmiMemOperand(Register object, int offset) {
// Assumes that Smis are shifted by 32 bits and little endianness.
STATIC_ASSERT(kSmiShift == 32);
return MemOperand(object, offset + (kSmiShift / kBitsPerByte));
}
void TurboAssembler::And(const Register& rd, const Register& rn,
const Operand& operand) {
DCHECK(allow_macro_instructions());
......@@ -1063,24 +1052,28 @@ void TurboAssembler::SmiUntag(Register dst, Register src) {
Asr(dst, src, kSmiShift);
}
void TurboAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); }
void MacroAssembler::SmiUntagToDouble(VRegister dst, Register src) {
DCHECK(dst.Is64Bits() && src.Is64Bits());
void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
STATIC_ASSERT(kXRegSizeInBits ==
static_cast<unsigned>(kSmiShift + kSmiValueSize));
DCHECK(dst.Is64Bits());
if (src.IsImmediateOffset() && src.shift_amount() == 0) {
if (FLAG_enable_slow_asserts) {
AssertSmi(src);
Ldr(dst, src);
AssertSmi(dst);
}
Scvtf(dst, src, kSmiShift);
}
void MacroAssembler::SmiUntagToFloat(VRegister dst, Register src) {
DCHECK(dst.Is32Bits() && src.Is64Bits());
if (FLAG_enable_slow_asserts) {
AssertSmi(src);
// Load value directly from the upper half-word.
// Assumes that Smis are shifted by 32 bits and little endianness.
DCHECK_EQ(kSmiShift, 32);
Ldrsw(dst, MemOperand(src.base(), src.offset() + (kSmiShift / kBitsPerByte),
src.addrmode()));
} else {
Ldr(dst, src);
SmiUntag(dst);
}
Scvtf(dst, src, kSmiShift);
}
void TurboAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); }
void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
Label* not_smi_label) {
STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
......
......@@ -97,11 +97,6 @@ namespace internal {
// Generate a MemOperand for loading a field from an object.
inline MemOperand FieldMemOperand(Register object, int offset);
inline MemOperand UntagSmiFieldMemOperand(Register object, int offset);
// Generate a MemOperand for loading a SMI from memory.
inline MemOperand UntagSmiMemOperand(Register object, int offset);
// ----------------------------------------------------------------------------
// MacroAssembler
......@@ -587,6 +582,7 @@ class TurboAssembler : public Assembler {
Register scratch1);
inline void SmiUntag(Register dst, Register src);
inline void SmiUntag(Register dst, const MemOperand& src);
inline void SmiUntag(Register smi);
// Calls Abort(msg) if the condition cond is not satisfied.
......@@ -1716,8 +1712,6 @@ class MacroAssembler : public TurboAssembler {
inline void SmiTag(Register dst, Register src);
inline void SmiTag(Register smi);
inline void SmiUntagToDouble(VRegister dst, Register src);
inline void SmiUntagToFloat(VRegister dst, Register src);
inline void JumpIfNotSmi(Register value, Label* not_smi_label);
inline void JumpIfBothSmi(Register value1, Register value2,
......
......@@ -266,8 +266,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Restore smi-tagged arguments count from the frame. Use fp relative
// addressing to avoid the circular dependency between padding existence and
// argc parity.
__ Ldrsw(x1,
UntagSmiMemOperand(fp, ConstructFrameConstants::kLengthOffset));
__ SmiUntag(x1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
......@@ -351,8 +350,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Restore constructor function and argument count.
__ Ldr(x1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ Ldrsw(x12,
UntagSmiMemOperand(fp, ConstructFrameConstants::kLengthOffset));
__ SmiUntag(x12, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Copy arguments to the expression stack. The called function pops the
// receiver along with its arguments, so we need an extra receiver on the
......@@ -451,8 +449,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Bind(&leave_frame);
// Restore smi-tagged arguments count from the frame.
__ Ldrsw(x1,
UntagSmiMemOperand(fp, ConstructFrameConstants::kLengthOffset));
__ SmiUntag(x1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
// Remove caller arguments from the stack and return.
......@@ -1124,7 +1121,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ JumpIfRoot(x10, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
__ Mov(kInterpreterBytecodeArrayRegister, x10);
__ Ldr(x10, UntagSmiFieldMemOperand(x11, DebugInfo::kFlagsOffset));
__ SmiUntag(x10, FieldMemOperand(x11, DebugInfo::kFlagsOffset));
__ And(x10, x10, Immediate(DebugInfo::kDebugExecutionMode));
STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
......@@ -1659,7 +1656,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ JumpIfSmi(x0, &failed);
// Peek the argument count from the stack, untagging at the same time.
__ Ldr(w4, UntagSmiMemOperand(sp, 3 * kPointerSize));
__ SmiUntag(x4, MemOperand(sp, 3 * kPointerSize));
__ Drop(4);
scope.GenerateLeaveFrame();
......@@ -1803,8 +1800,8 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
__ Ldrsw(w1, UntagSmiFieldMemOperand(
x1, FixedArray::OffsetOfElementAt(
__ SmiUntag(x1,
FieldMemOperand(x1, FixedArray::OffsetOfElementAt(
DeoptimizationData::kOsrPcOffsetIndex)));
// Compute the target address = code_obj + header_size + osr_offset
......@@ -2340,9 +2337,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ Bind(&arguments_adaptor);
{
// Just load the length from ArgumentsAdaptorFrame.
__ Ldrsw(len,
UntagSmiMemOperand(
args_fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(
len,
MemOperand(args_fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
}
__ Bind(&arguments_done);
}
......@@ -2489,8 +2486,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
Label no_bound_arguments;
__ Ldr(bound_argv,
FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
__ Ldrsw(bound_argc,
UntagSmiFieldMemOperand(bound_argv, FixedArray::kLengthOffset));
__ SmiUntag(bound_argc,
FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
__ Cbz(bound_argc, &no_bound_arguments);
{
// ----------- S t a t e -------------
......@@ -3620,7 +3617,7 @@ void Builtins::Generate_ArrayConstructorImpl(MacroAssembler* masm) {
// Get the elements kind and case on that.
__ JumpIfRoot(allocation_site, Heap::kUndefinedValueRootIndex, &no_info);
__ Ldrsw(kind, UntagSmiFieldMemOperand(
__ SmiUntag(kind, FieldMemOperand(
allocation_site,
AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ And(kind, kind, AllocationSite::ElementsKindBits::kMask);
......
......@@ -1230,9 +1230,8 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
}
// Get the target bytecode offset from the frame.
__ Lw(
kInterpreterBytecodeOffsetRegister,
UntagSmiMemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
// Dispatch to the target bytecode.
__ Daddu(a1, kInterpreterBytecodeArrayRegister,
......@@ -1305,9 +1304,7 @@ static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
// IsSmi: Is builtin
__ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
__ li(scratch1, ExternalReference::builtins_address(masm->isolate()));
// Avoid untagging the Smi by merging the shift
STATIC_ASSERT(kPointerSizeLog2 < kSmiShift);
__ dsrl(sfi_data, sfi_data, kSmiShift - kPointerSizeLog2);
__ SmiScale(sfi_data, sfi_data, kPointerSizeLog2);
__ Daddu(scratch1, scratch1, sfi_data);
__ Ld(sfi_data, MemOperand(scratch1));
__ Branch(&done);
......@@ -1638,7 +1635,7 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
__ Lw(a1, UntagSmiMemOperand(a1, FixedArray::OffsetOfElementAt(
__ SmiUntag(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
DeoptimizationData::kOsrPcOffsetIndex) -
kHeapObjectTag));
......@@ -1887,8 +1884,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
// __ sll(a0, a0, kSmiTagSize);
__ dsll32(a0, a0, 0);
__ SmiTag(a0);
__ li(a4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ MultiPush(a0.bit() | a1.bit() | a4.bit() | fp.bit() | ra.bit());
__ Push(Smi::kZero); // Padding.
......@@ -2015,8 +2011,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ bind(&arguments_adaptor);
{
// Just get the length from the ArgumentsAdaptorFrame.
__ Lw(a7, UntagSmiMemOperand(
a6, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(a7,
MemOperand(a6, ArgumentsAdaptorFrameConstants::kLengthOffset));
}
__ bind(&arguments_done);
......@@ -2174,7 +2170,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// Load [[BoundArguments]] into a2 and length of that into a4.
__ Ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
__ Lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
__ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
......@@ -2221,7 +2217,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// Copy [[BoundArguments]] to the stack (below the arguments).
{
Label loop, done_loop;
__ Lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
__ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
__ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop);
__ Dsubu(a4, a4, Operand(1));
......@@ -2327,7 +2323,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Load [[BoundArguments]] into a2 and length of that into a4.
__ Ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
__ Lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
__ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
......@@ -2375,7 +2371,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Copy [[BoundArguments]] to the stack (below the arguments).
{
Label loop, done_loop;
__ Lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
__ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
__ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop);
__ Dsubu(a4, a4, Operand(1));
......
......@@ -65,10 +65,10 @@ void AdaptorWithExitFrameType(MacroAssembler* masm,
// Unconditionally insert argc, target and new target as extra arguments. They
// will be used by stack frame iterators when constructing the stack trace.
__ PopReturnAddressTo(kScratchRegister);
__ Integer32ToSmi(rax, rax);
__ SmiTag(rax, rax);
__ PushRoot(Heap::kTheHoleValueRootIndex); // Padding.
__ Push(rax);
__ SmiToInteger32(rax, rax);
__ SmiUntag(rax, rax);
__ Push(rdi);
__ Push(rdx);
__ PushReturnAddressFrom(kScratchRegister);
......@@ -100,7 +100,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Push the number of arguments to the callee.
__ Integer32ToSmi(rax, rax);
__ SmiTag(rax, rax);
__ Push(rax);
// Push a copy of the target function and the new target.
__ Push(rdi);
......@@ -115,7 +115,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Pop(rdx);
__ Pop(rdi);
__ Pop(rax);
__ SmiToInteger32(rax, rax);
__ SmiUntag(rax, rax);
}
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
......@@ -137,7 +137,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::CONSTRUCT);
// Preserve the incoming parameters on the stack.
__ Integer32ToSmi(rcx, rax);
__ SmiTag(rcx, rax);
__ Push(rsi);
__ Push(rcx);
......@@ -208,7 +208,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Label post_instantiation_deopt_entry, not_create_implicit_receiver;
// Preserve the incoming parameters on the stack.
__ Integer32ToSmi(rcx, rax);
__ SmiTag(rcx, rax);
__ Push(rsi);
__ Push(rcx);
__ Push(rdi);
......@@ -271,8 +271,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Restore constructor function and argument count.
__ movp(rdi, Operand(rbp, ConstructFrameConstants::kConstructorOffset));
__ SmiToInteger32(rax,
Operand(rbp, ConstructFrameConstants::kLengthOffset));
__ SmiUntag(rax, Operand(rbp, ConstructFrameConstants::kLengthOffset));
// Set up pointer to last argument.
__ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
......@@ -952,7 +951,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Push bytecode array and Smi tagged bytecode offset.
__ Push(kInterpreterBytecodeArrayRegister);
__ Integer32ToSmi(rcx, kInterpreterBytecodeOffsetRegister);
__ SmiTag(rcx, kInterpreterBytecodeOffsetRegister);
__ Push(rcx);
// Allocate the local and temporary register file on the stack.
......@@ -1022,7 +1021,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ movp(kInterpreterBytecodeOffsetRegister,
Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiToInteger32(kInterpreterBytecodeOffsetRegister,
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeOffsetRegister);
// Either return, or advance to the next bytecode and dispatch.
......@@ -1051,7 +1050,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
&bytecode_array_loaded);
__ movp(kInterpreterBytecodeArrayRegister, kScratchRegister);
__ SmiToInteger32(rax, FieldOperand(rcx, DebugInfo::kFlagsOffset));
__ SmiUntag(rax, FieldOperand(rcx, DebugInfo::kFlagsOffset));
__ andb(rax, Immediate(DebugInfo::kDebugExecutionMode));
STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
static_cast<int>(DebugInfo::kSideEffects));
......@@ -1265,7 +1264,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Get the target bytecode offset from the frame.
__ movp(kInterpreterBytecodeOffsetRegister,
Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiToInteger32(kInterpreterBytecodeOffsetRegister,
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
......@@ -1283,7 +1282,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ movp(kInterpreterBytecodeOffsetRegister,
Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiToInteger32(kInterpreterBytecodeOffsetRegister,
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeOffsetRegister);
// Load the current bytecode.
......@@ -1297,7 +1296,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
&if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ Integer32ToSmi(rbx, kInterpreterBytecodeOffsetRegister);
__ SmiTag(rbx, kInterpreterBytecodeOffsetRegister);
__ movp(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp), rbx);
Generate_InterpreterEnterBytecode(masm);
......@@ -1479,7 +1478,7 @@ void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
{
// Load the code object at builtins_table[builtin_id] into scratch1.
__ SmiToInteger32(scratch1, scratch1);
__ SmiUntag(scratch1, scratch1);
__ Move(scratch0, ExternalReference::builtins_address(masm->isolate()));
__ movp(scratch1, Operand(scratch0, scratch1, times_pointer_size, 0));
......@@ -1524,7 +1523,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// Preserve argument count for later compare.
__ movp(rcx, rax);
// Push the number of arguments to the callee.
__ Integer32ToSmi(rax, rax);
__ SmiTag(rax, rax);
__ Push(rax);
// Push a copy of the target function and the new target.
__ Push(rdi);
......@@ -1561,7 +1560,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ Drop(2);
__ Pop(rcx);
__ SmiToInteger32(rcx, rcx);
__ SmiUntag(rcx, rcx);
scope.GenerateLeaveFrame();
__ PopReturnAddressTo(rbx);
......@@ -1575,7 +1574,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ Pop(rdx);
__ Pop(rdi);
__ Pop(rax);
__ SmiToInteger32(rax, rax);
__ SmiUntag(rax, rax);
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
......@@ -1602,7 +1601,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
int code = config->GetAllocatableGeneralCode(i);
__ popq(Register::from_code(code));
if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
__ SmiToInteger32(Register::from_code(code), Register::from_code(code));
__ SmiUntag(Register::from_code(code), Register::from_code(code));
}
}
__ movq(
......@@ -1948,7 +1947,7 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
// Preserve the number of arguments on the stack. Must preserve rax,
// rbx and rcx because these registers are used when copying the
// arguments and the receiver.
__ Integer32ToSmi(r8, rax);
__ SmiTag(r8, rax);
__ Push(r8);
__ Push(Immediate(0)); // Padding.
......@@ -1975,7 +1974,7 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// -- rdx : requested object size (untagged)
// -- rsp[0] : return address
// -----------------------------------
__ Integer32ToSmi(rdx, rdx);
__ SmiTag(rdx, rdx);
__ PopReturnAddressTo(rcx);
__ Push(rdx);
__ PushReturnAddressFrom(rcx);
......@@ -1989,7 +1988,7 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// -- rdx : requested object size (untagged)
// -- rsp[0] : return address
// -----------------------------------
__ Integer32ToSmi(rdx, rdx);
__ SmiTag(rdx, rdx);
__ PopReturnAddressTo(rcx);
__ Push(rdx);
__ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
......@@ -2221,8 +2220,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ jmp(&arguments_done, Label::kNear);
__ bind(&arguments_adaptor);
{
__ SmiToInteger32(
r8, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(r8,
Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
}
__ bind(&arguments_done);
......@@ -2328,7 +2327,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// TODO(bmeurer): Inline the allocation here to avoid building the frame
// in the fast case? (fall back to AllocateInNewSpace?)
FrameScope scope(masm, StackFrame::INTERNAL);
__ Integer32ToSmi(rax, rax);
__ SmiTag(rax, rax);
__ Push(rax);
__ Push(rdi);
__ movp(rax, rcx);
......@@ -2339,7 +2338,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ movp(rcx, rax);
__ Pop(rdi);
__ Pop(rax);
__ SmiToInteger32(rax, rax);
__ SmiUntag(rax, rax);
}
__ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
......@@ -2383,7 +2382,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Load [[BoundArguments]] into rcx and length of that into rbx.
Label no_bound_arguments;
__ movp(rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
__ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ SmiUntag(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ testl(rbx, rbx);
__ j(zero, &no_bound_arguments);
{
......@@ -2435,7 +2434,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
{
Label loop;
__ movp(rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
__ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ SmiUntag(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ bind(&loop);
__ decl(rbx);
__ movp(kScratchRegister, FieldOperand(rcx, rbx, times_pointer_size,
......@@ -2666,8 +2665,7 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
__ movp(rbx, Operand(rax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
__ SmiToInteger32(rbx,
Operand(rbx, FixedArray::OffsetOfElementAt(
__ SmiUntag(rbx, Operand(rbx, FixedArray::OffsetOfElementAt(
DeoptimizationData::kOsrPcOffsetIndex) -
kHeapObjectTag));
......@@ -3252,7 +3250,7 @@ void Builtins::Generate_ArrayConstructorImpl(MacroAssembler* masm) {
// Only look at the lower 16 bits of the transition info.
__ movp(rdx, FieldOperand(
rbx, AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ SmiToInteger32(rdx, rdx);
__ SmiUntag(rdx, rdx);
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
__ andp(rdx, Immediate(AllocationSite::ElementsKindBits::kMask));
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
......
......@@ -563,8 +563,7 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
// Load arguments count from current arguments adaptor frame (note, it
// does not include receiver).
Register caller_args_count_reg = scratch1;
__ SmiToInteger32(
caller_args_count_reg,
__ SmiUntag(caller_args_count_reg,
Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
ParameterCount callee_args_count(args_reg);
......
......@@ -5346,9 +5346,9 @@ void MacroAssembler::AssertStackIsAligned() {
}
}
void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) {
void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
if (SmiValuesAre32Bits()) {
Lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
Lw(dst, MemOperand(src.rm(), SmiWordOffset(src.offset())));
} else {
Lw(dst, src);
SmiUntag(dst);
......
......@@ -131,18 +131,6 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
}
inline MemOperand UntagSmiMemOperand(Register rm, int offset) {
// Assumes that Smis are shifted by 32 bits.
STATIC_ASSERT(kSmiShift == 32);
return MemOperand(rm, SmiWordOffset(offset));
}
inline MemOperand UntagSmiFieldMemOperand(Register rm, int offset) {
return UntagSmiMemOperand(rm, offset - kHeapObjectTag);
}
// Generate a MemOperand for storing arguments 5..N on the stack
// when calling CallCFunction().
// TODO(plind): Currently ONLY used for O32. Should be fixed for
......@@ -508,6 +496,7 @@ class TurboAssembler : public Assembler {
#undef DEFINE_INSTRUCTION2
#undef DEFINE_INSTRUCTION3
void SmiUntag(Register dst, const MemOperand& src);
void SmiUntag(Register dst, Register src) {
if (SmiValuesAre32Bits()) {
STATIC_ASSERT(kSmiShift == 32);
......@@ -1199,9 +1188,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
}
}
// Combine load with untagging or scaling.
void SmiLoadUntag(Register dst, MemOperand src);
// Test if the register contains a smi.
inline void SmiTst(Register value, Register scratch) {
And(scratch, value, Operand(kSmiTagMask));
......
......@@ -1098,48 +1098,31 @@ void TurboAssembler::Move(Register dst, ExternalReference ext) {
movp(dst, ext.address(), RelocInfo::EXTERNAL_REFERENCE);
}
void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
void MacroAssembler::SmiTag(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0);
if (dst != src) {
movl(dst, src);
movp(dst, src);
}
shlp(dst, Immediate(kSmiShift));
}
void TurboAssembler::SmiToInteger32(Register dst, Register src) {
void TurboAssembler::SmiUntag(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0);
if (dst != src) {
movp(dst, src);
}
if (SmiValuesAre32Bits()) {
shrp(dst, Immediate(kSmiShift));
} else {
DCHECK(SmiValuesAre31Bits());
sarl(dst, Immediate(kSmiShift));
}
sarp(dst, Immediate(kSmiShift));
}
void TurboAssembler::SmiToInteger32(Register dst, Operand src) {
void TurboAssembler::SmiUntag(Register dst, Operand src) {
if (SmiValuesAre32Bits()) {
movl(dst, Operand(src, kSmiShift / kBitsPerByte));
// Sign extend to 64-bit.
movsxlq(dst, dst);
} else {
DCHECK(SmiValuesAre31Bits());
movl(dst, src);
sarl(dst, Immediate(kSmiShift));
}
}
void MacroAssembler::SmiToInteger64(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0);
if (dst != src) {
movp(dst, src);
}
sarp(dst, Immediate(kSmiShift));
if (kPointerSize == kInt32Size) {
// Sign extend to 64-bit.
movsxlq(dst, dst);
}
}
......@@ -2385,13 +2368,13 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
if (expected.is_reg()) {
Integer32ToSmi(expected.reg(), expected.reg());
SmiTag(expected.reg(), expected.reg());
Push(expected.reg());
}
if (actual.is_reg()) {
Integer32ToSmi(actual.reg(), actual.reg());
SmiTag(actual.reg(), actual.reg());
Push(actual.reg());
SmiToInteger64(actual.reg(), actual.reg());
SmiUntag(actual.reg(), actual.reg());
}
if (new_target.is_valid()) {
Push(new_target);
......@@ -2406,11 +2389,11 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
}
if (actual.is_reg()) {
Pop(actual.reg());
SmiToInteger64(actual.reg(), actual.reg());
SmiUntag(actual.reg(), actual.reg());
}
if (expected.is_reg()) {
Pop(expected.reg());
SmiToInteger64(expected.reg(), expected.reg());
SmiUntag(expected.reg(), expected.reg());
}
}
bind(&skip_hook);
......
......@@ -357,10 +357,9 @@ class TurboAssembler : public Assembler {
movp(dst, ptr, rmode);
}
// Convert smi to 32-bit integer. I.e., not sign extended into
// high 32 bits of destination.
void SmiToInteger32(Register dst, Register src);
void SmiToInteger32(Register dst, Operand src);
// Convert smi to word-size sign-extended value.
void SmiUntag(Register dst, Register src);
void SmiUntag(Register dst, Operand src);
// Loads the address of the external reference into the destination
// register.
......@@ -681,13 +680,8 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Conversions between tagged smi values and non-tagged integer values.
// Tag an integer value. The result must be known to be a valid smi value.
// Only uses the low 32 bits of the src register. Sets the N and Z flags
// based on the value of the resulting smi.
void Integer32ToSmi(Register dst, Register src);
// Convert smi to 64-bit integer (sign extended if necessary).
void SmiToInteger64(Register dst, Register src);
// Tag an word-size value. The result must be known to be a valid smi value.
void SmiTag(Register dst, Register src);
// Simple comparison of smis. Both sides must be known smis to use these,
// otherwise use Cmp.
......
......@@ -228,9 +228,7 @@ TEST(SmiCompare) {
CHECK_EQ(0, result);
}
TEST(Integer32ToSmi) {
TEST(SmiTag) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
size_t allocated;
......@@ -243,36 +241,36 @@ TEST(Integer32ToSmi) {
Label exit;
__ movq(rax, Immediate(1)); // Test number.
__ movl(rcx, Immediate(0));
__ Integer32ToSmi(rcx, rcx);
__ movq(rcx, Immediate(0));
__ SmiTag(rcx, rcx);
__ Set(rdx, reinterpret_cast<intptr_t>(Smi::kZero));
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(2)); // Test number.
__ movl(rcx, Immediate(1024));
__ Integer32ToSmi(rcx, rcx);
__ movq(rcx, Immediate(1024));
__ SmiTag(rcx, rcx);
__ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(1024)));
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(3)); // Test number.
__ movl(rcx, Immediate(-1));
__ Integer32ToSmi(rcx, rcx);
__ movq(rcx, Immediate(-1));
__ SmiTag(rcx, rcx);
__ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(-1)));
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(4)); // Test number.
__ movl(rcx, Immediate(Smi::kMaxValue));
__ Integer32ToSmi(rcx, rcx);
__ movq(rcx, Immediate(Smi::kMaxValue));
__ SmiTag(rcx, rcx);
__ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMaxValue)));
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(5)); // Test number.
__ movl(rcx, Immediate(Smi::kMinValue));
__ Integer32ToSmi(rcx, rcx);
__ movq(rcx, Immediate(Smi::kMinValue));
__ SmiTag(rcx, rcx);
__ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMinValue)));
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
......@@ -280,36 +278,36 @@ TEST(Integer32ToSmi) {
// Different target register.
__ movq(rax, Immediate(6)); // Test number.
__ movl(rcx, Immediate(0));
__ Integer32ToSmi(r8, rcx);
__ movq(rcx, Immediate(0));
__ SmiTag(r8, rcx);
__ Set(rdx, reinterpret_cast<intptr_t>(Smi::kZero));
__ cmpq(r8, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(7)); // Test number.
__ movl(rcx, Immediate(1024));
__ Integer32ToSmi(r8, rcx);
__ movq(rcx, Immediate(1024));
__ SmiTag(r8, rcx);
__ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(1024)));
__ cmpq(r8, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(8)); // Test number.
__ movl(rcx, Immediate(-1));
__ Integer32ToSmi(r8, rcx);
__ movq(rcx, Immediate(-1));
__ SmiTag(r8, rcx);
__ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(-1)));
__ cmpq(r8, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(9)); // Test number.
__ movl(rcx, Immediate(Smi::kMaxValue));
__ Integer32ToSmi(r8, rcx);
__ movq(rcx, Immediate(Smi::kMaxValue));
__ SmiTag(r8, rcx);
__ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMaxValue)));
__ cmpq(r8, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(10)); // Test number.
__ movl(rcx, Immediate(Smi::kMinValue));
__ Integer32ToSmi(r8, rcx);
__ movq(rcx, Immediate(Smi::kMinValue));
__ SmiTag(r8, rcx);
__ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMinValue)));
__ cmpq(r8, rdx);
__ j(not_equal, &exit);
......@@ -347,7 +345,7 @@ TEST(SmiCheck) {
// CheckSmi
__ movl(rcx, Immediate(0));
__ Integer32ToSmi(rcx, rcx);
__ SmiTag(rcx, rcx);
cond = masm->CheckSmi(rcx);
__ j(NegateCondition(cond), &exit);
......@@ -358,7 +356,7 @@ TEST(SmiCheck) {
__ incq(rax);
__ movl(rcx, Immediate(-1));
__ Integer32ToSmi(rcx, rcx);
__ SmiTag(rcx, rcx);
cond = masm->CheckSmi(rcx);
__ j(NegateCondition(cond), &exit);
......@@ -369,7 +367,7 @@ TEST(SmiCheck) {
__ incq(rax);
__ movl(rcx, Immediate(Smi::kMaxValue));
__ Integer32ToSmi(rcx, rcx);
__ SmiTag(rcx, rcx);
cond = masm->CheckSmi(rcx);
__ j(NegateCondition(cond), &exit);
......@@ -380,7 +378,7 @@ TEST(SmiCheck) {
__ incq(rax);
__ movl(rcx, Immediate(Smi::kMinValue));
__ Integer32ToSmi(rcx, rcx);
__ SmiTag(rcx, rcx);
cond = masm->CheckSmi(rcx);
__ j(NegateCondition(cond), &exit);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment