Commit 0243ba80 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[cleanup][x32] Remove x32 leftovers from x64 assembler, pt.1

addp, andp, cmpp, decp, incp, leap, negp, orp, subp, testp, xorp,
shrp, sarp, shlp are replaced with respective quad-word instructions.

Some wrongly-used xxxp instructions in regexp code are replaced with xxxl.

Bug: v8:8621, v8:8562
Change-Id: If5fe3229a35805b8ef84d3e1ffa05cf9ed91ceef
Reviewed-on: https://chromium-review.googlesource.com/c/1446451Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarSigurd Schneider <sigurds@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59217}
parent d928d25c
This diff is collapsed.
...@@ -201,7 +201,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode { ...@@ -201,7 +201,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
zone_(gen->zone()) {} zone_(gen->zone()) {}
void Generate() final { void Generate() final {
__ subp(rsp, Immediate(kDoubleSize)); __ subq(rsp, Immediate(kDoubleSize));
unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(), unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kDoubleSize); kDoubleSize);
__ Movsd(MemOperand(rsp, 0), input_); __ Movsd(MemOperand(rsp, 0), input_);
...@@ -214,7 +214,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode { ...@@ -214,7 +214,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
__ Call(BUILTIN_CODE(isolate_, DoubleToI), RelocInfo::CODE_TARGET); __ Call(BUILTIN_CODE(isolate_, DoubleToI), RelocInfo::CODE_TARGET);
} }
__ movl(result_, MemOperand(rsp, 0)); __ movl(result_, MemOperand(rsp, 0));
__ addp(rsp, Immediate(kDoubleSize)); __ addq(rsp, Immediate(kDoubleSize));
unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(), unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(),
-kDoubleSize); -kDoubleSize);
} }
...@@ -250,7 +250,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode { ...@@ -250,7 +250,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ CheckPageFlag(value_, scratch0_, __ CheckPageFlag(value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingMask, zero, MemoryChunk::kPointersToHereAreInterestingMask, zero,
exit()); exit());
__ leap(scratch1_, operand_); __ leaq(scratch1_, operand_);
RememberedSetAction const remembered_set_action = RememberedSetAction const remembered_set_action =
mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
...@@ -592,7 +592,7 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg, ...@@ -592,7 +592,7 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
Label done; Label done;
// Check if current frame is an arguments adaptor frame. // Check if current frame is an arguments adaptor frame.
__ cmpp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset), __ cmpq(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &done, Label::kNear); __ j(not_equal, &done, Label::kNear);
...@@ -708,7 +708,7 @@ void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() { ...@@ -708,7 +708,7 @@ void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
// bits cleared if we are speculatively executing the wrong PC. // bits cleared if we are speculatively executing the wrong PC.
__ ComputeCodeStartAddress(rbx); __ ComputeCodeStartAddress(rbx);
__ xorq(kSpeculationPoisonRegister, kSpeculationPoisonRegister); __ xorq(kSpeculationPoisonRegister, kSpeculationPoisonRegister);
__ cmpp(kJavaScriptCallCodeStartRegister, rbx); __ cmpq(kJavaScriptCallCodeStartRegister, rbx);
__ movp(rbx, Immediate(-1)); __ movp(rbx, Immediate(-1));
__ cmovq(equal, kSpeculationPoisonRegister, rbx); __ cmovq(equal, kSpeculationPoisonRegister, rbx);
} }
...@@ -3741,7 +3741,7 @@ void CodeGenerator::AssembleConstructFrame() { ...@@ -3741,7 +3741,7 @@ void CodeGenerator::AssembleConstructFrame() {
const uint32_t saves_fp_count = base::bits::CountPopulation(saves_fp); const uint32_t saves_fp_count = base::bits::CountPopulation(saves_fp);
const int stack_size = saves_fp_count * kQuadWordSize; const int stack_size = saves_fp_count * kQuadWordSize;
// Adjust the stack pointer. // Adjust the stack pointer.
__ subp(rsp, Immediate(stack_size)); __ subq(rsp, Immediate(stack_size));
// Store the registers on the stack. // Store the registers on the stack.
int slot_idx = 0; int slot_idx = 0;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) { for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
...@@ -3793,7 +3793,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) { ...@@ -3793,7 +3793,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
slot_idx++; slot_idx++;
} }
// Adjust the stack pointer. // Adjust the stack pointer.
__ addp(rsp, Immediate(stack_size)); __ addq(rsp, Immediate(stack_size));
} }
unwinding_info_writer_.MarkBlockWillExit(); unwinding_info_writer_.MarkBlockWillExit();
......
...@@ -112,11 +112,11 @@ inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) { ...@@ -112,11 +112,11 @@ inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
assm->pushq(reg.gp()); assm->pushq(reg.gp());
break; break;
case kWasmF32: case kWasmF32:
assm->subp(rsp, Immediate(kSystemPointerSize)); assm->subq(rsp, Immediate(kSystemPointerSize));
assm->Movss(Operand(rsp, 0), reg.fp()); assm->Movss(Operand(rsp, 0), reg.fp());
break; break;
case kWasmF64: case kWasmF64:
assm->subp(rsp, Immediate(kSystemPointerSize)); assm->subq(rsp, Immediate(kSystemPointerSize));
assm->Movsd(Operand(rsp, 0), reg.fp()); assm->Movsd(Operand(rsp, 0), reg.fp());
break; break;
default: default:
...@@ -698,9 +698,9 @@ bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) { ...@@ -698,9 +698,9 @@ bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs, void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) { LiftoffRegister rhs) {
if (lhs.gp() != dst.gp()) { if (lhs.gp() != dst.gp()) {
leap(dst.gp(), Operand(lhs.gp(), rhs.gp(), times_1, 0)); leaq(dst.gp(), Operand(lhs.gp(), rhs.gp(), times_1, 0));
} else { } else {
addp(dst.gp(), rhs.gp()); addq(dst.gp(), rhs.gp());
} }
} }
...@@ -1412,7 +1412,7 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst, ...@@ -1412,7 +1412,7 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
} }
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) { void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
cmpp(rsp, Operand(limit_address, 0)); cmpq(rsp, Operand(limit_address, 0));
j(below_equal, ool_code); j(below_equal, ool_code);
} }
...@@ -1435,7 +1435,7 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { ...@@ -1435,7 +1435,7 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
LiftoffRegList fp_regs = regs & kFpCacheRegList; LiftoffRegList fp_regs = regs & kFpCacheRegList;
unsigned num_fp_regs = fp_regs.GetNumRegsSet(); unsigned num_fp_regs = fp_regs.GetNumRegsSet();
if (num_fp_regs) { if (num_fp_regs) {
subp(rsp, Immediate(num_fp_regs * kStackSlotSize)); subq(rsp, Immediate(num_fp_regs * kStackSlotSize));
unsigned offset = 0; unsigned offset = 0;
while (!fp_regs.is_empty()) { while (!fp_regs.is_empty()) {
LiftoffRegister reg = fp_regs.GetFirstRegSet(); LiftoffRegister reg = fp_regs.GetFirstRegSet();
...@@ -1456,7 +1456,7 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { ...@@ -1456,7 +1456,7 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
fp_regs.clear(reg); fp_regs.clear(reg);
fp_offset += sizeof(double); fp_offset += sizeof(double);
} }
if (fp_offset) addp(rsp, Immediate(fp_offset)); if (fp_offset) addq(rsp, Immediate(fp_offset));
LiftoffRegList gp_regs = regs & kGpCacheRegList; LiftoffRegList gp_regs = regs & kGpCacheRegList;
while (!gp_regs.is_empty()) { while (!gp_regs.is_empty()) {
LiftoffRegister reg = gp_regs.GetLastRegSet(); LiftoffRegister reg = gp_regs.GetLastRegSet();
...@@ -1476,7 +1476,7 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig, ...@@ -1476,7 +1476,7 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
const LiftoffRegister* rets, const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes, ValueType out_argument_type, int stack_bytes,
ExternalReference ext_ref) { ExternalReference ext_ref) {
subp(rsp, Immediate(stack_bytes)); subq(rsp, Immediate(stack_bytes));
int arg_bytes = 0; int arg_bytes = 0;
for (ValueType param_type : sig->parameters()) { for (ValueType param_type : sig->parameters()) {
...@@ -1510,7 +1510,7 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig, ...@@ -1510,7 +1510,7 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
liftoff::Load(this, *next_result_reg, Operand(rsp, 0), out_argument_type); liftoff::Load(this, *next_result_reg, Operand(rsp, 0), out_argument_type);
} }
addp(rsp, Immediate(stack_bytes)); addq(rsp, Immediate(stack_bytes));
} }
void LiftoffAssembler::CallNativeWasmCode(Address addr) { void LiftoffAssembler::CallNativeWasmCode(Address addr) {
...@@ -1538,12 +1538,12 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) { ...@@ -1538,12 +1538,12 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
} }
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) { void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
subp(rsp, Immediate(size)); subq(rsp, Immediate(size));
movp(addr, rsp); movp(addr, rsp);
} }
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
addp(rsp, Immediate(size)); addq(rsp, Immediate(size));
} }
void LiftoffStackSlots::Construct() { void LiftoffStackSlots::Construct() {
......
...@@ -642,10 +642,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { ...@@ -642,10 +642,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void mulq(Register src); void mulq(Register src);
#define DECLARE_SHIFT_INSTRUCTION(instruction, subcode) \ #define DECLARE_SHIFT_INSTRUCTION(instruction, subcode) \
void instruction##p(Register dst, Immediate imm8) { \
shift(dst, imm8, subcode, kSystemPointerSize); \
} \
\
void instruction##l(Register dst, Immediate imm8) { \ void instruction##l(Register dst, Immediate imm8) { \
shift(dst, imm8, subcode, kInt32Size); \ shift(dst, imm8, subcode, kInt32Size); \
} \ } \
...@@ -654,10 +650,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { ...@@ -654,10 +650,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
shift(dst, imm8, subcode, kInt64Size); \ shift(dst, imm8, subcode, kInt64Size); \
} \ } \
\ \
void instruction##p(Operand dst, Immediate imm8) { \
shift(dst, imm8, subcode, kSystemPointerSize); \
} \
\
void instruction##l(Operand dst, Immediate imm8) { \ void instruction##l(Operand dst, Immediate imm8) { \
shift(dst, imm8, subcode, kInt32Size); \ shift(dst, imm8, subcode, kInt32Size); \
} \ } \
...@@ -666,18 +658,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { ...@@ -666,18 +658,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
shift(dst, imm8, subcode, kInt64Size); \ shift(dst, imm8, subcode, kInt64Size); \
} \ } \
\ \
void instruction##p_cl(Register dst) { \
shift(dst, subcode, kSystemPointerSize); \
} \
\
void instruction##l_cl(Register dst) { shift(dst, subcode, kInt32Size); } \ void instruction##l_cl(Register dst) { shift(dst, subcode, kInt32Size); } \
\ \
void instruction##q_cl(Register dst) { shift(dst, subcode, kInt64Size); } \ void instruction##q_cl(Register dst) { shift(dst, subcode, kInt64Size); } \
\ \
void instruction##p_cl(Operand dst) { \
shift(dst, subcode, kSystemPointerSize); \
} \
\
void instruction##l_cl(Operand dst) { shift(dst, subcode, kInt32Size); } \ void instruction##l_cl(Operand dst) { shift(dst, subcode, kInt32Size); } \
\ \
void instruction##q_cl(Operand dst) { shift(dst, subcode, kInt64Size); } void instruction##q_cl(Operand dst) { shift(dst, subcode, kInt64Size); }
......
...@@ -24,7 +24,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, ...@@ -24,7 +24,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
const int kNumberOfRegisters = Register::kNumRegisters; const int kNumberOfRegisters = Register::kNumRegisters;
const int kDoubleRegsSize = kDoubleSize * XMMRegister::kNumRegisters; const int kDoubleRegsSize = kDoubleSize * XMMRegister::kNumRegisters;
__ subp(rsp, Immediate(kDoubleRegsSize)); __ subq(rsp, Immediate(kDoubleRegsSize));
const RegisterConfiguration* config = RegisterConfiguration::Default(); const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
...@@ -35,7 +35,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, ...@@ -35,7 +35,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
} }
const int kFloatRegsSize = kFloatSize * XMMRegister::kNumRegisters; const int kFloatRegsSize = kFloatSize * XMMRegister::kNumRegisters;
__ subp(rsp, Immediate(kFloatRegsSize)); __ subq(rsp, Immediate(kFloatRegsSize));
for (int i = 0; i < config->num_allocatable_float_registers(); ++i) { for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
int code = config->GetAllocatableFloatCode(i); int code = config->GetAllocatableFloatCode(i);
...@@ -69,10 +69,10 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, ...@@ -69,10 +69,10 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Get the address of the location in the code object // Get the address of the location in the code object
// and compute the fp-to-sp delta in register arg5. // and compute the fp-to-sp delta in register arg5.
__ movp(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize)); __ movp(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize));
__ leap(arg5, Operand(rsp, kSavedRegistersAreaSize + kPCOnStackSize)); __ leaq(arg5, Operand(rsp, kSavedRegistersAreaSize + kPCOnStackSize));
__ subp(arg5, rbp); __ subq(arg5, rbp);
__ negp(arg5); __ negq(arg5);
// Allocate a new deoptimizer object. // Allocate a new deoptimizer object.
__ PrepareCallCFunction(6); __ PrepareCallCFunction(6);
...@@ -119,7 +119,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, ...@@ -119,7 +119,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ movl(rcx, Operand(rsp, src_offset)); __ movl(rcx, Operand(rsp, src_offset));
__ movl(Operand(rbx, dst_offset), rcx); __ movl(Operand(rbx, dst_offset), rcx);
} }
__ addp(rsp, Immediate(kFloatRegsSize)); __ addq(rsp, Immediate(kFloatRegsSize));
// Fill in the double input registers. // Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset(); int double_regs_offset = FrameDescription::double_registers_offset();
...@@ -129,25 +129,25 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, ...@@ -129,25 +129,25 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
} }
// Remove the return address from the stack. // Remove the return address from the stack.
__ addp(rsp, Immediate(kPCOnStackSize)); __ addq(rsp, Immediate(kPCOnStackSize));
// Compute a pointer to the unwinding limit in register rcx; that is // Compute a pointer to the unwinding limit in register rcx; that is
// the first stack slot not part of the input frame. // the first stack slot not part of the input frame.
__ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset())); __ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ addp(rcx, rsp); __ addq(rcx, rsp);
// Unwind the stack down to - but not including - the unwinding // Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input // limit and copy the contents of the activation frame to the input
// frame description. // frame description.
__ leap(rdx, Operand(rbx, FrameDescription::frame_content_offset())); __ leaq(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
Label pop_loop_header; Label pop_loop_header;
__ jmp(&pop_loop_header); __ jmp(&pop_loop_header);
Label pop_loop; Label pop_loop;
__ bind(&pop_loop); __ bind(&pop_loop);
__ Pop(Operand(rdx, 0)); __ Pop(Operand(rdx, 0));
__ addp(rdx, Immediate(sizeof(intptr_t))); __ addq(rdx, Immediate(sizeof(intptr_t)));
__ bind(&pop_loop_header); __ bind(&pop_loop_header);
__ cmpp(rcx, rsp); __ cmpq(rcx, rsp);
__ j(not_equal, &pop_loop); __ j(not_equal, &pop_loop);
// Compute the output frame in the deoptimizer. // Compute the output frame in the deoptimizer.
...@@ -170,7 +170,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, ...@@ -170,7 +170,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// last FrameDescription**. // last FrameDescription**.
__ movl(rdx, Operand(rax, Deoptimizer::output_count_offset())); __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
__ movp(rax, Operand(rax, Deoptimizer::output_offset())); __ movp(rax, Operand(rax, Deoptimizer::output_offset()));
__ leap(rdx, Operand(rax, rdx, times_pointer_size, 0)); __ leaq(rdx, Operand(rax, rdx, times_pointer_size, 0));
__ jmp(&outer_loop_header); __ jmp(&outer_loop_header);
__ bind(&outer_push_loop); __ bind(&outer_push_loop);
// Inner loop state: rbx = current FrameDescription*, rcx = loop index. // Inner loop state: rbx = current FrameDescription*, rcx = loop index.
...@@ -178,14 +178,14 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, ...@@ -178,14 +178,14 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset())); __ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header); __ jmp(&inner_loop_header);
__ bind(&inner_push_loop); __ bind(&inner_push_loop);
__ subp(rcx, Immediate(sizeof(intptr_t))); __ subq(rcx, Immediate(sizeof(intptr_t)));
__ Push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset())); __ Push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
__ bind(&inner_loop_header); __ bind(&inner_loop_header);
__ testp(rcx, rcx); __ testq(rcx, rcx);
__ j(not_zero, &inner_push_loop); __ j(not_zero, &inner_push_loop);
__ addp(rax, Immediate(kSystemPointerSize)); __ addq(rax, Immediate(kSystemPointerSize));
__ bind(&outer_loop_header); __ bind(&outer_loop_header);
__ cmpp(rax, rdx); __ cmpq(rax, rdx);
__ j(below, &outer_push_loop); __ j(below, &outer_push_loop);
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
......
This diff is collapsed.
...@@ -770,9 +770,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { ...@@ -770,9 +770,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
static const int shift = Field::kShift; static const int shift = Field::kShift;
static const int mask = Field::kMask >> Field::kShift; static const int mask = Field::kMask >> Field::kShift;
if (shift != 0) { if (shift != 0) {
shrp(reg, Immediate(shift)); shrq(reg, Immediate(shift));
} }
andp(reg, Immediate(mask)); andq(reg, Immediate(mask));
} }
// Abort execution if argument is a smi, enabled via --debug-code. // Abort execution if argument is a smi, enabled via --debug-code.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment