Commit 5f598829 authored by jyan's avatar jyan Committed by Commit bot

S390: Upstream changes from the past 2 weeks

Upstream S390 platform specific code to latest lkgr from the past 2 weeks.

R=danno@chromium.org,jkummerow@chromium.org,jochen@chromium.org,joransiu@ca.ibm.com,michael_dawson@ca.ibm.com,mbrandy@us.ibm.com
BUG=

Review URL: https://codereview.chromium.org/1799893002

Cr-Commit-Position: refs/heads/master@{#34787}
parent 9a6069d1
......@@ -92,7 +92,7 @@ class S390OperandConverter final : public InstructionOperandConverter {
return SlotToMemOperand(AllocatedOperand::cast(op)->index());
}
MemOperand SlotToMemOperand(int slot) {
MemOperand SlotToMemOperand(int slot) const {
FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
......@@ -580,6 +580,31 @@ void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
frame_access_state()->SetFrameAccessToSP();
}
void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
Register scratch1,
Register scratch2,
Register scratch3) {
DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
Label done;
// Check if current frame is an arguments adaptor frame.
__ LoadP(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ CmpSmiLiteral(scratch1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
__ bne(&done);
// Load arguments count from current arguments adaptor frame (note, it
// does not include receiver).
Register caller_args_count_reg = scratch1;
__ LoadP(caller_args_count_reg,
MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(caller_args_count_reg);
ParameterCount callee_args_count(args_reg);
__ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
scratch3);
__ bind(&done);
}
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
S390OperandConverter i(this, instr);
......@@ -600,9 +625,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
AssembleDeconstructActivationRecord(stack_param_delta);
if (opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
i.TempRegister(2));
}
if (HasRegisterInput(instr, 0)) {
__ AddP(ip, i.InputRegister(0),
Operand(Code::kHeaderSize - kHeapObjectTag));
......@@ -633,6 +664,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallJSFunctionFromJSFunction:
case kArchTailCallJSFunction: {
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
......@@ -644,6 +676,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
AssembleDeconstructActivationRecord(stack_param_delta);
if (opcode == kArchTailCallJSFunctionFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
i.TempRegister(2));
}
__ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(ip);
frame_access_state()->ClearSPDelta();
......@@ -806,7 +843,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_BINOP(srlg, srlg);
break;
#endif
case kS390_ShiftRightAlg32:
case kS390_ShiftRightArith32:
if (HasRegisterInput(instr, 1)) {
if (i.OutputRegister().is(i.InputRegister(1))) {
__ LoadRR(kScratchReg, i.InputRegister(1));
......@@ -820,9 +857,44 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
#if V8_TARGET_ARCH_S390X
case kS390_ShiftRightAlg64:
case kS390_ShiftRightArith64:
ASSEMBLE_BINOP(srag, srag);
break;
#endif
#if !V8_TARGET_ARCH_S390X
case kS390_ShiftLeftPair:
if (instr->InputAt(2)->IsImmediate()) {
__ ShiftLeftPair(i.OutputRegister(0), i.OutputRegister(1),
i.InputRegister(0), i.InputRegister(1),
i.InputInt32(2));
} else {
__ ShiftLeftPair(i.OutputRegister(0), i.OutputRegister(1),
i.InputRegister(0), i.InputRegister(1), kScratchReg,
i.InputRegister(2));
}
break;
case kS390_ShiftRightPair:
if (instr->InputAt(2)->IsImmediate()) {
__ ShiftRightPair(i.OutputRegister(0), i.OutputRegister(1),
i.InputRegister(0), i.InputRegister(1),
i.InputInt32(2));
} else {
__ ShiftRightPair(i.OutputRegister(0), i.OutputRegister(1),
i.InputRegister(0), i.InputRegister(1), kScratchReg,
i.InputRegister(2));
}
break;
case kS390_ShiftRightArithPair:
if (instr->InputAt(2)->IsImmediate()) {
__ ShiftRightArithPair(i.OutputRegister(0), i.OutputRegister(1),
i.InputRegister(0), i.InputRegister(1),
i.InputInt32(2));
} else {
__ ShiftRightArithPair(i.OutputRegister(0), i.OutputRegister(1),
i.InputRegister(0), i.InputRegister(1),
kScratchReg, i.InputRegister(2));
}
break;
#endif
case kS390_RotRight32:
if (HasRegisterInput(instr, 1)) {
......@@ -858,7 +930,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ risbg(i.OutputRegister(), i.OutputRegister(), Operand(startBit),
Operand(endBit), Operand::Zero(), true);
} else {
UNIMPLEMENTED();
int shiftAmount = i.InputInt32(1);
int clearBitLeft = 63 - i.InputInt32(2);
int clearBitRight = i.InputInt32(3);
__ rll(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
__ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBitLeft));
__ srlg(i.OutputRegister(), i.OutputRegister(),
Operand((clearBitLeft + clearBitRight)));
__ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBitRight));
}
break;
#if V8_TARGET_ARCH_S390X
......@@ -873,7 +952,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ risbg(i.OutputRegister(), i.InputRegister(0), Operand(startBit),
Operand(endBit), Operand(shiftAmount), true);
} else {
UNIMPLEMENTED();
int shiftAmount = i.InputInt32(1);
int clearBit = 63 - i.InputInt32(2);
__ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
__ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
__ srlg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
}
break;
case kS390_RotLeftAndClearRight64:
......@@ -884,7 +967,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ risbg(i.OutputRegister(), i.InputRegister(0), Operand(startBit),
Operand(endBit), Operand(shiftAmount), true);
} else {
UNIMPLEMENTED();
int shiftAmount = i.InputInt32(1);
int clearBit = i.InputInt32(2);
__ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
__ srlg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
__ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
}
break;
#endif
......@@ -1652,17 +1739,21 @@ void CodeGenerator::AssembleDeoptimizerCall(
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (descriptor->IsCFunctionCall()) {
__ Push(r14, fp);
__ LoadRR(fp, sp);
} else if (descriptor->IsJSFunctionCall()) {
__ Prologue(this->info()->GeneratePreagedPrologue(), ip);
} else if (frame()->needs_frame()) {
if (!ABI_CALL_VIA_IP && info()->output_code_kind() == Code::WASM_FUNCTION) {
// TODO(mbrandy): Restrict only to the wasm wrapper case.
__ StubPrologue();
if (frame()->needs_frame()) {
if (descriptor->IsCFunctionCall()) {
__ Push(r14, fp);
__ LoadRR(fp, sp);
} else if (descriptor->IsJSFunctionCall()) {
__ Prologue(this->info()->GeneratePreagedPrologue(), ip);
} else {
__ StubPrologue(ip);
StackFrame::Type type = info()->GetOutputStackFrameType();
if (!ABI_CALL_VIA_IP &&
info()->output_code_kind() == Code::WASM_FUNCTION) {
// TODO(mbrandy): Restrict only to the wasm wrapper case.
__ StubPrologue(type);
} else {
__ StubPrologue(type, ip);
}
}
} else {
frame()->SetElidedFrameSizeInSlots(0);
......
......@@ -19,10 +19,13 @@ namespace compiler {
V(S390_Xor) \
V(S390_ShiftLeft32) \
V(S390_ShiftLeft64) \
V(S390_ShiftLeftPair) \
V(S390_ShiftRight32) \
V(S390_ShiftRight64) \
V(S390_ShiftRightAlg32) \
V(S390_ShiftRightAlg64) \
V(S390_ShiftRightPair) \
V(S390_ShiftRightArith32) \
V(S390_ShiftRightArith64) \
V(S390_ShiftRightArithPair) \
V(S390_RotRight32) \
V(S390_RotRight64) \
V(S390_Not) \
......
......@@ -20,10 +20,13 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_Xor:
case kS390_ShiftLeft32:
case kS390_ShiftLeft64:
case kS390_ShiftLeftPair:
case kS390_ShiftRight32:
case kS390_ShiftRight64:
case kS390_ShiftRightAlg32:
case kS390_ShiftRightAlg64:
case kS390_ShiftRightPair:
case kS390_ShiftRightArith32:
case kS390_ShiftRightArith64:
case kS390_ShiftRightArithPair:
case kS390_RotRight32:
case kS390_RotRight64:
case kS390_Not:
......
......@@ -750,12 +750,48 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
return;
}
}
VisitRRO(this, kS390_ShiftRightAlg32, node, kShift32Imm);
VisitRRO(this, kS390_ShiftRightArith32, node, kShift32Imm);
}
#if !V8_TARGET_ARCH_S390X
void VisitPairShift(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
S390OperandGenerator g(selector);
Int32Matcher m(node->InputAt(2));
InstructionOperand shift_operand;
if (m.HasValue()) {
shift_operand = g.UseImmediate(m.node());
} else {
shift_operand = g.UseUniqueRegister(m.node());
}
InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
g.UseRegister(node->InputAt(1)),
shift_operand};
InstructionOperand outputs[] = {
g.DefineSameAsFirst(node),
g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
selector->Emit(opcode, 2, outputs, 3, inputs);
}
void InstructionSelector::VisitWord32PairShl(Node* node) {
VisitPairShift(this, kS390_ShiftLeftPair, node);
}
void InstructionSelector::VisitWord32PairShr(Node* node) {
VisitPairShift(this, kS390_ShiftRightPair, node);
}
void InstructionSelector::VisitWord32PairSar(Node* node) {
VisitPairShift(this, kS390_ShiftRightArithPair, node);
}
#endif
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64Sar(Node* node) {
VisitRRO(this, kS390_ShiftRightAlg64, node, kShift64Imm);
VisitRRO(this, kS390_ShiftRightArith64, node, kShift64Imm);
}
#endif
......@@ -819,6 +855,10 @@ void InstructionSelector::VisitInt64Add(Node* node) {
}
#endif
#if !V8_TARGET_ARCH_S390X
void InstructionSelector::VisitInt32PairAdd(Node* node) { UNIMPLEMENTED(); }
#endif
void InstructionSelector::VisitInt32Sub(Node* node) {
S390OperandGenerator g(this);
Int32BinopMatcher m(node);
......@@ -1627,6 +1667,8 @@ void InstructionSelector::EmitPrepareArguments(
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
S390OperandGenerator g(this);
Emit(kS390_DoubleExtractLowWord32, g.DefineAsRegister(node),
......
This diff is collapsed.
......@@ -193,11 +193,14 @@ class LCodeGen : public LCodeGenBase {
void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
LInstruction* instr, LOperand* context);
void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
Register scratch2, Register scratch3);
// Generate a direct call to a known function. Expects the function
// to be in r4.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
LInstruction* instr);
bool is_tail_call, LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
......
......@@ -839,14 +839,14 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
Representation r = value->representation();
HType type = value->type();
ToBooleanStub::Types expected = instr->expected_input_types();
if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
ToBooleanICStub::Types expected = instr->expected_input_types();
if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
LInstruction* branch = new (zone()) LBranch(UseRegister(value));
if (!easy_case &&
((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
!expected.IsGeneric())) {
branch = AssignEnvironment(branch);
}
......
......@@ -503,6 +503,7 @@ class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
DECLARE_HYDROGEN_ACCESSOR(ApplyArguments)
LOperand* function() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
......
......@@ -125,8 +125,7 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
// Load the function pointer off of our current stack frame.
__ LoadP(r3, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset -
kPointerSize));
__ LoadP(r3, MemOperand(fp, FrameDropperFrameConstants::kFunctionOffset));
// Pop return address and frame
__ LeaveFrame(StackFrame::INTERNAL);
......
......@@ -309,9 +309,7 @@ void FullCodeGenerator::Generate() {
// Visit the declarations and body unless there is an illegal
// redeclaration.
if (scope()->HasIllegalRedeclaration()) {
Comment cmnt(masm_, "[ Declarations");
VisitForEffect(scope()->GetIllegalRedeclaration());
EmitIllegalRedeclaration();
} else {
PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
{
......@@ -511,7 +509,7 @@ void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
false_label_);
DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
if (false_label_ != fall_through_) __ b(false_label_);
} else if (lit->IsTrue() || lit->IsJSObject()) {
......@@ -602,7 +600,7 @@ void FullCodeGenerator::TestContext::Plug(bool flag) const {
void FullCodeGenerator::DoTest(Expression* condition, Label* if_true,
Label* if_false, Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
__ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
......@@ -1008,11 +1006,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We got a fixed array in register r2. Iterate through that.
__ bind(&fixed_array);
int const vector_index = SmiFromSlot(slot)->value();
__ EmitLoadTypeFeedbackVector(r3);
__ mov(r4, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
__ StoreP(
r4, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(vector_index)), r0);
__ LoadSmiLiteral(r3, Smi::FromInt(1)); // Smi(1) indicates slow check
__ Push(r3, r2); // Smi and array
__ LoadP(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
......@@ -1049,12 +1042,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ CmpP(r6, r4);
__ beq(&update_each);
// We might get here from TurboFan or Crankshaft when something in the
// for-in loop body deopts and only now notice in fullcodegen, that we
// can now longer use the enum cache, i.e. left fast mode. So better record
// this information here, in case we later OSR back into this loop or
// reoptimize the whole function w/o rerunning the loop with the slow
// mode object in fullcodegen (which would result in a deopt loop).
// We need to filter the key, record slow-path here.
int const vector_index = SmiFromSlot(slot)->value();
__ EmitLoadTypeFeedbackVector(r2);
__ mov(r4, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
__ StoreP(
......@@ -1770,64 +1759,44 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
switch (expr->yield_kind()) {
case Yield::kSuspend:
// Pop value from top-of-stack slot; box result into result register.
EmitCreateIteratorResult(false);
PushOperand(result_register());
// Fall through.
case Yield::kInitial: {
Label suspend, continuation, post_runtime, resume;
__ b(&suspend, Label::kNear);
__ bind(&continuation);
// When we arrive here, the stack top is the resume mode and
// result_register() holds the input value (the argument given to the
// respective resume operation).
__ RecordGeneratorContinuation();
__ pop(r3);
__ CmpSmiLiteral(r3, Smi::FromInt(JSGeneratorObject::RETURN), r0);
__ bne(&resume);
__ push(result_register());
EmitCreateIteratorResult(true);
EmitUnwindAndReturn();
__ bind(&suspend);
OperandStackDepthIncrement(1); // Not popped on this path.
VisitForAccumulatorValue(expr->generator_object());
DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
__ LoadSmiLiteral(r3, Smi::FromInt(continuation.pos()));
__ StoreP(r3,
FieldMemOperand(r2, JSGeneratorObject::kContinuationOffset));
__ StoreP(cp, FieldMemOperand(r2, JSGeneratorObject::kContextOffset));
__ LoadRR(r3, cp);
__ RecordWriteField(r2, JSGeneratorObject::kContextOffset, r3, r4,
kLRHasBeenSaved, kDontSaveFPRegs);
__ AddP(r3, fp, Operand(StandardFrameConstants::kExpressionsOffset));
__ CmpP(sp, r3);
__ beq(&post_runtime);
__ push(r2); // generator object
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
PopOperand(result_register());
EmitReturnSequence();
__ bind(&resume);
context()->Plug(result_register());
break;
}
case Yield::kFinal: {
// Pop value from top-of-stack slot, box result into result register.
EmitCreateIteratorResult(true);
EmitUnwindAndReturn();
break;
}
Label suspend, continuation, post_runtime, resume;
__ b(&suspend);
__ bind(&continuation);
// When we arrive here, the stack top is the resume mode and
// result_register() holds the input value (the argument given to the
// respective resume operation).
__ RecordGeneratorContinuation();
__ pop(r3);
__ CmpSmiLiteral(r3, Smi::FromInt(JSGeneratorObject::RETURN), r0);
__ bne(&resume);
__ push(result_register());
EmitCreateIteratorResult(true);
EmitUnwindAndReturn();
__ bind(&suspend);
OperandStackDepthIncrement(1); // Not popped on this path.
VisitForAccumulatorValue(expr->generator_object());
DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
__ LoadSmiLiteral(r3, Smi::FromInt(continuation.pos()));
__ StoreP(r3, FieldMemOperand(r2, JSGeneratorObject::kContinuationOffset),
r0);
__ StoreP(cp, FieldMemOperand(r2, JSGeneratorObject::kContextOffset), r0);
__ LoadRR(r3, cp);
__ RecordWriteField(r2, JSGeneratorObject::kContextOffset, r3, r4,
kLRHasBeenSaved, kDontSaveFPRegs);
__ AddP(r3, fp, Operand(StandardFrameConstants::kExpressionsOffset));
__ CmpP(sp, r3);
__ beq(&post_runtime);
__ push(r2); // generator object
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
PopOperand(result_register());
EmitReturnSequence();
case Yield::kDelegating:
UNREACHABLE();
}
__ bind(&resume);
context()->Plug(result_register());
}
void FullCodeGenerator::EmitGeneratorResume(
......@@ -1886,9 +1855,7 @@ void FullCodeGenerator::EmitGeneratorResume(
// fp = caller's frame pointer.
// cp = callee's context,
// r6 = callee's JS function.
__ PushFixedFrame(r6);
// Adjust FP to point to saved FP.
__ lay(fp, MemOperand(sp, StandardFrameConstants::kFixedFrameSizeFromFp));
__ PushStandardFrame(r6);
// Load the operand stack size.
__ LoadP(r5, FieldMemOperand(r3, JSGeneratorObject::kOperandStackOffset));
......@@ -3884,10 +3851,11 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
__ CompareRoot(r2, nil_value);
Split(eq, if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, expr->CompareOperationFeedbackId());
__ CompareRoot(r2, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
__ JumpIfSmi(r2, if_false);
__ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
__ LoadlB(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
__ AndP(r0, r3, Operand(1 << Map::kIsUndetectable));
Split(ne, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
}
......@@ -3963,12 +3931,6 @@ void FullCodeGenerator::ClearPendingMessage() {
__ StoreP(r3, MemOperand(ip));
}
void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
DCHECK(!slot.IsInvalid());
__ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
Operand(SmiFromSlot(slot)));
}
void FullCodeGenerator::DeferredCommands::EmitCommands() {
DCHECK(!result_register().is(r3));
// Restore the accumulator (r2) and token (r3).
......@@ -4027,7 +3989,6 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code, Address pc,
break;
}
case ON_STACK_REPLACEMENT:
case OSR_AFTER_STACK_CHECK:
// <decrement profiling counter>
// brc 0x0, <ok> ;; patched to NOP BRC
// brasrl r14, <interrupt stub address>
......@@ -4050,8 +4011,10 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
Isolate* isolate, Code* unoptimized_code, Address pc) {
Address call_address = Assembler::target_address_from_return_address(pc);
Address branch_address = call_address - 4;
#ifdef DEBUG
Address interrupt_address =
Assembler::target_address_at(call_address, unoptimized_code);
#endif
DCHECK(BRC == Instruction::S390OpcodeValue(branch_address));
// For interrupt, we expect a branch greater than or equal
......@@ -4068,13 +4031,9 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
USE(kOSRBranchInstruction);
DCHECK(kOSRBranchInstruction == br_instr);
if (interrupt_address == isolate->builtins()->OnStackReplacement()->entry()) {
return ON_STACK_REPLACEMENT;
}
DCHECK(interrupt_address ==
isolate->builtins()->OsrAfterStackCheck()->entry());
return OSR_AFTER_STACK_CHECK;
isolate->builtins()->OnStackReplacement()->entry());
return ON_STACK_REPLACEMENT;
}
} // namespace internal
......
......@@ -28,6 +28,9 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Save context register
__ push(cp);
if (accessor_index >= 0) {
DCHECK(!holder.is(scratch));
DCHECK(!receiver.is(scratch));
......@@ -51,7 +54,7 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
}
// Restore context register.
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ pop(cp);
}
__ Ret();
}
......@@ -65,8 +68,9 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Save context register
// Save value register, so we can restore it later.
__ Push(value());
__ Push(cp, value());
if (accessor_index >= 0) {
DCHECK(!holder.is(scratch));
......@@ -92,10 +96,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
}
// We have to return the passed value, not the return value of the setter.
__ Pop(r2);
// Restore context register.
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Pop(cp, r2);
}
__ Ret();
}
......
......@@ -1804,7 +1804,7 @@ void Assembler::ay(Register r1, const MemOperand& opnd) {
// Add Immediate (32)
void Assembler::afi(Register r1, const Operand& opnd) {
ril_form(ALFI, r1, opnd);
ril_form(AFI, r1, opnd);
}
// Add Halfword Register-Storage (32)
......@@ -1920,7 +1920,7 @@ void Assembler::alg(Register r1, const MemOperand& opnd) {
// Add Logical Immediate (64)
void Assembler::algfi(Register r1, const Operand& opnd) {
ril_form(ALFI, r1, opnd);
ril_form(ALGFI, r1, opnd);
}
// Add Logical Register-Register (64)
......
......@@ -239,7 +239,7 @@ Register ToRegister(int num);
// Coprocessor register
struct CRegister {
bool is_valid() const { return 0 <= reg_code && reg_code < 16; }
bool is_valid() const { return 0 <= reg_code && reg_code < 8; }
bool is(CRegister creg) const { return reg_code == creg.reg_code; }
int code() const {
DCHECK(is_valid());
......@@ -264,14 +264,6 @@ const CRegister cr4 = {4};
const CRegister cr5 = {5};
const CRegister cr6 = {6};
const CRegister cr7 = {7};
const CRegister cr8 = {8};
const CRegister cr9 = {9};
const CRegister cr10 = {10};
const CRegister cr11 = {11};
const CRegister cr12 = {12};
const CRegister cr13 = {13};
const CRegister cr14 = {14};
const CRegister cr15 = {15};
// TODO(john.yan) Define SIMD registers.
typedef DoubleRegister Simd128Register;
......@@ -1253,7 +1245,9 @@ class Assembler : public AssemblerBase {
void dq(uint64_t data);
void dp(uintptr_t data);
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
AssemblerPositionsRecorder* positions_recorder() {
return &positions_recorder_;
}
void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
ConstantPoolEntry::Access access,
......@@ -1451,8 +1445,8 @@ class Assembler : public AssemblerBase {
List<Handle<Code> > code_targets_;
PositionsRecorder positions_recorder_;
friend class PositionsRecorder;
AssemblerPositionsRecorder positions_recorder_;
friend class AssemblerPositionsRecorder;
friend class EnsureSpace;
};
......
......@@ -521,6 +521,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- r3 : constructor function
// -- r4 : allocation site or undefined
// -- r5 : new target
// -- cp : context
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
......@@ -537,11 +538,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
if (!create_implicit_receiver) {
__ SmiTag(r6, r2);
__ LoadAndTestP(r6, r6);
__ Push(r4, r6);
__ Push(cp, r4, r6);
__ PushRoot(Heap::kTheHoleValueRootIndex);
} else {
__ SmiTag(r2);
__ Push(r4, r2);
__ Push(cp, r4, r2);
// Allocate the new receiver object.
__ Push(r3, r5);
......@@ -614,7 +615,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r2: result
// sp[0]: receiver
// sp[1]: number of arguments (smi-tagged)
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
if (create_implicit_receiver) {
// If the result is an object (in the ECMA sense), we should get rid
......@@ -738,9 +739,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r0,r7-r9, cp may be clobbered
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Clear the context before we push it when entering the internal frame.
__ LoadImmP(cp, Operand::Zero());
// Enter an internal frame.
{
// FrameScope ends up calling MacroAssembler::EnterFrame here
......@@ -841,14 +839,24 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushFixedFrame(r3);
__ AddP(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
__ PushStandardFrame(r3);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
__ LoadP(r2, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
Label array_done;
Register debug_info = r4;
DCHECK(!debug_info.is(r2));
__ LoadP(debug_info,
FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset));
// Load original bytecode array or the debug copy.
__ LoadP(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
__ CmpSmiLiteral(debug_info, DebugInfo::uninitialized(), r0);
__ beq(&array_done);
__ LoadP(kInterpreterBytecodeArrayRegister,
FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex));
__ bind(&array_done);
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
......@@ -1178,8 +1186,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
__ LoadRR(ip, r2);
// Perform prologue operations usually performed by the young code stub.
__ PushFixedFrame(r3);
__ la(fp, MemOperand(sp, StandardFrameConstants::kFixedFrameSizeFromFp));
__ PushStandardFrame(r3);
// Jump to point after the code-age stub.
__ AddP(r2, ip, Operand(kNoCodeAgeSequenceLength));
......@@ -1932,7 +1939,8 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
// Drop possible interpreter handler/stub frame.
{
Label no_interpreter_frame;
__ LoadP(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
__ LoadP(scratch3,
MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::STUB), r0);
__ bne(&no_interpreter_frame);
__ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
......@@ -1940,80 +1948,40 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
}
// Check if next frame is an arguments adaptor frame.
Register caller_args_count_reg = scratch1;
Label no_arguments_adaptor, formal_parameter_count_loaded;
__ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ LoadP(scratch3,
MemOperand(scratch2, StandardFrameConstants::kContextOffset));
__ LoadP(
scratch3,
MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
__ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
__ bne(&no_arguments_adaptor);
// Drop arguments adaptor frame and load arguments count.
// Drop current frame and load arguments count from arguments adaptor frame.
__ LoadRR(fp, scratch2);
__ LoadP(scratch1,
__ LoadP(caller_args_count_reg,
MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(scratch1);
__ SmiUntag(caller_args_count_reg);
__ b(&formal_parameter_count_loaded);
__ bind(&no_arguments_adaptor);
// Load caller's formal parameter count
__ LoadP(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ LoadP(scratch1,
MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
__ LoadP(scratch1,
FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
__ LoadW(scratch1,
__ LoadW(caller_args_count_reg,
FieldMemOperand(scratch1,
SharedFunctionInfo::kFormalParameterCountOffset));
#if !V8_TARGET_ARCH_S390X
__ SmiUntag(scratch1);
__ SmiUntag(caller_args_count_reg);
#endif
__ bind(&formal_parameter_count_loaded);
// Calculate the end of destination area where we will put the arguments
// after we drop current frame. We AddP kPointerSize to count the receiver
// argument which is not included into formal parameters count.
Register dst_reg = scratch2;
__ ShiftLeftP(dst_reg, scratch1, Operand(kPointerSizeLog2));
__ AddP(dst_reg, fp, dst_reg);
__ AddP(dst_reg, dst_reg,
Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
Register src_reg = scratch1;
__ ShiftLeftP(src_reg, args_reg, Operand(kPointerSizeLog2));
__ AddP(src_reg, sp, src_reg);
// Count receiver argument as well (not included in args_reg).
__ AddP(src_reg, src_reg, Operand(kPointerSize));
if (FLAG_debug_code) {
__ CmpLogicalP(src_reg, dst_reg);
__ Check(lt, kStackAccessBelowStackPointer);
}
// Restore caller's frame pointer and return address now as they will be
// overwritten by the copying loop.
__ RestoreFrameStateForTailCall();
// Now copy callee arguments to the caller frame going backwards to avoid
// callee arguments corruption (source and destination areas could overlap).
// Both src_reg and dst_reg are pointing to the word after the one to copy,
// so they must be pre-decremented in the loop.
Register tmp_reg = scratch3;
Label loop;
DCHECK(!src_reg.is(r1));
DCHECK(!dst_reg.is(r1));
DCHECK(!tmp_reg.is(r1));
__ AddP(r1, args_reg, Operand(1)); // +1 for receiver
__ bind(&loop);
__ lay(src_reg, MemOperand(src_reg, -kPointerSize));
__ LoadP(tmp_reg, MemOperand(src_reg));
__ lay(dst_reg, MemOperand(dst_reg, -kPointerSize));
__ StoreP(tmp_reg, MemOperand(dst_reg));
__ BranchOnCount(r1, &loop);
// Leave current frame.
__ LoadRR(sp, dst_reg);
ParameterCount callee_args_count(args_reg);
__ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
scratch3);
__ bind(&done);
}
} // namespace
......@@ -2261,6 +2229,12 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ CmpP(r7, Operand(JS_BOUND_FUNCTION_TYPE));
__ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
RelocInfo::CODE_TARGET, eq);
// Check if target has a [[Call]] internal method.
__ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
__ TestBit(r6, Map::kIsCallable);
__ beq(&non_callable);
__ CmpP(r7, Operand(JS_PROXY_TYPE));
__ bne(&non_function);
......@@ -2281,10 +2255,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
__ bind(&non_function);
// Check if target has a [[Call]] internal method.
__ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
__ TestBit(r6, Map::kIsCallable, r0);
__ beq(&non_callable);
// Overwrite the original receiver the (original) target.
__ ShiftLeftP(r7, r2, Operand(kPointerSizeLog2));
__ StoreP(r3, MemOperand(sp, r7));
......
......@@ -477,7 +477,9 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, Register lhs,
__ b(both_loaded_as_doubles);
}
// Fast negative check for internalized-to-internalized equality.
// Fast negative check for internalized-to-internalized equality or receiver
// equality. Also handles the undetectable receiver to null/undefined
// comparison.
static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
Register lhs, Register rhs,
Label* possible_strings,
......@@ -485,7 +487,7 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
DCHECK((lhs.is(r2) && rhs.is(r3)) || (lhs.is(r3) && rhs.is(r2)));
// r4 is object type of rhs.
Label object_test, return_unequal, undetectable;
Label object_test, return_equal, return_unequal, undetectable;
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ mov(r0, Operand(kIsNotStringMask));
__ AndP(r0, r4);
......@@ -526,6 +528,16 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
__ bind(&undetectable);
__ AndP(r0, r7, Operand(1 << Map::kIsUndetectable));
__ beq(&return_unequal);
// If both sides are JSReceivers, then the result is false according to
// the HTML specification, which says that only comparisons with null or
// undefined are affected by special casing for document.all.
__ CompareInstanceType(r4, r4, ODDBALL_TYPE);
__ beq(&return_equal);
__ CompareInstanceType(r5, r5, ODDBALL_TYPE);
__ bne(&return_unequal);
__ bind(&return_equal);
__ LoadImmP(r2, Operand(EQUAL));
__ Ret();
}
......@@ -676,12 +688,12 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(lhs, rhs);
__ CallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
__ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
}
// Turn true into 0 and false into some non-zero value.
STATIC_ASSERT(EQUAL == 0);
__ LoadRoot(r4, Heap::kTrueValueRootIndex);
__ sub(r3, r3, r4);
__ LoadRoot(r3, Heap::kTrueValueRootIndex);
__ SubP(r2, r2, r3);
__ Ret();
} else {
__ Push(lhs, rhs);
......@@ -2918,41 +2930,6 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
__ bne(&loop);
}
void StringCompareStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : left
// -- r2 : right
// -- r14 : return address
// -----------------------------------
__ AssertString(r3);
__ AssertString(r2);
Label not_same;
__ CmpP(r2, r3);
__ bne(&not_same);
__ LoadSmiLiteral(r2, Smi::FromInt(EQUAL));
__ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r3,
r4);
__ Ret();
__ bind(&not_same);
// Check that both objects are sequential one-byte strings.
Label runtime;
__ JumpIfNotBothSequentialOneByteStrings(r3, r2, r4, r5, &runtime);
// Compare flat one-byte strings natively.
__ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r4,
r5);
StringHelper::GenerateCompareFlatOneByteStrings(masm, r3, r2, r4, r5, r6);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
__ Push(r3, r2);
__ TailCallRuntime(Runtime::kStringCompare);
}
void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : left
......@@ -3255,10 +3232,17 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
// Handle more complex cases in runtime.
__ bind(&runtime);
__ Push(left, right);
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(left, right);
__ CallRuntime(Runtime::kStringEqual);
}
__ LoadRoot(r3, Heap::kTrueValueRootIndex);
__ SubP(r2, r2, r3);
__ Ret();
} else {
__ Push(left, right);
__ TailCallRuntime(Runtime::kStringCompare);
}
......@@ -3782,7 +3766,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
__ LoadP(r3, MemOperand(fp, parameter_count_offset));
if (function_mode() == JS_FUNCTION_STUB_MODE) {
__ AddP(r3, Operand(1));
......@@ -4774,7 +4758,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ bind(&loop);
__ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
__ bind(&loop_entry);
__ LoadP(ip, MemOperand(r4, StandardFrameConstants::kMarkerOffset));
__ LoadP(ip, MemOperand(r4, StandardFrameConstants::kFunctionOffset));
__ CmpP(ip, r3);
__ bne(&loop);
}
......@@ -4783,7 +4767,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// arguments adaptor frame below the function frame).
Label no_rest_parameters;
__ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
__ LoadP(ip, MemOperand(r4, StandardFrameConstants::kContextOffset));
__ LoadP(ip, MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
__ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
__ bne(&no_rest_parameters);
......@@ -4931,7 +4915,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
__ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ LoadP(r2, MemOperand(r6, StandardFrameConstants::kContextOffset));
__ LoadP(r2, MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset));
__ CmpSmiLiteral(r2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
__ beq(&adaptor_frame);
......@@ -5158,7 +5142,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ bind(&loop);
__ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
__ bind(&loop_entry);
__ LoadP(ip, MemOperand(r4, StandardFrameConstants::kMarkerOffset));
__ LoadP(ip, MemOperand(r4, StandardFrameConstants::kFunctionOffset));
__ CmpP(ip, r3);
__ bne(&loop);
}
......@@ -5166,7 +5150,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ LoadP(r5, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
__ LoadP(ip, MemOperand(r5, StandardFrameConstants::kContextOffset));
__ LoadP(ip, MemOperand(r5, CommonFrameConstants::kContextOrFrameTypeOffset));
__ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
__ beq(&arguments_adaptor);
{
......@@ -5577,7 +5561,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// context save
__ push(context);
if (!is_lazy) {
if (!is_lazy()) {
// load context from callee
__ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset));
}
......
......@@ -270,6 +270,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
Register array = r8;
Register length = r7;
Register scratch = r1;
Register scratch3 = r9;
Register hole_value = r9;
// Verify input registers don't conflict with locals.
DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
......@@ -315,11 +317,11 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ AddP(src_elements,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
__ SmiToPtrArrayOffset(length, length);
__ LoadRoot(r9, Heap::kTheHoleValueRootIndex);
__ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex);
Label initialization_loop, loop_done;
__ ShiftRightP(r0, length, Operand(kPointerSizeLog2));
__ beq(&loop_done, Label::kNear /*, cr0*/);
__ ShiftRightP(scratch, length, Operand(kPointerSizeLog2));
__ beq(&loop_done, Label::kNear);
// Allocating heap numbers in the loop below can fail and cause a jump to
// gc_required. We can't leave a partly initialized FixedArray behind,
......@@ -327,9 +329,9 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ AddP(dst_elements, array,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
__ bind(&initialization_loop);
__ StoreP(r9, MemOperand(dst_elements, kPointerSize));
__ StoreP(hole_value, MemOperand(dst_elements, kPointerSize));
__ lay(dst_elements, MemOperand(dst_elements, kPointerSize));
__ BranchOnCount(r0, &initialization_loop);
__ BranchOnCount(scratch, &initialization_loop);
__ AddP(dst_elements, array,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
......@@ -342,7 +344,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// not tagged, +4
// dst_end: end of destination FixedArray, not tagged
// array: destination FixedArray
// r9: the-hole pointer
// hole_value: the-hole pointer
// heap_number_map: heap number map
__ b(&loop, Label::kNear);
......@@ -353,7 +355,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole);
__ StoreP(r9, MemOperand(dst_elements));
__ StoreP(hole_value, MemOperand(dst_elements));
__ AddP(dst_elements, Operand(kPointerSize));
__ CmpLogicalP(dst_elements, dst_end);
__ bge(&loop_done);
......@@ -370,7 +372,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Non-hole double, copy value into a heap number.
Register heap_number = receiver;
Register scratch2 = value;
__ AllocateHeapNumber(heap_number, scratch2, r1, heap_number_map,
__ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
&gc_required);
// heap_number: new heap number
#if V8_TARGET_ARCH_S390X
......@@ -607,9 +609,7 @@ CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
new CodePatcher(isolate, young_sequence_.start(),
young_sequence_.length(), CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
patcher->masm()->PushFixedFrame(r3);
patcher->masm()->la(
fp, MemOperand(sp, StandardFrameConstants::kFixedFrameSizeFromFp));
patcher->masm()->PushStandardFrame(r3);
}
#ifdef DEBUG
......
......@@ -151,7 +151,12 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Allocate a new deoptimizer object.
// Pass six arguments in r2 to r7.
__ PrepareCallCFunction(6, r7);
__ LoadImmP(r2, Operand::Zero());
Label context_check;
__ LoadP(r3, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ JumpIfSmi(r3, &context_check);
__ LoadP(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ LoadImmP(r3, Operand(type())); // bailout type,
// r4: bailout id already loaded.
// r5: code address or 0 already loaded.
......@@ -230,6 +235,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
__ pop(r2); // Restore deoptimizer object (class Deoptimizer).
__ LoadP(sp, MemOperand(r2, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
// Outer loop state: r6 = current "FrameDescription** output_",
......
......@@ -155,12 +155,11 @@ class EntryFrameConstants : public AllStatic {
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
class ExitFrameConstants : public AllStatic {
class ExitFrameConstants : public TypedFrameConstants {
public:
static const int kFrameSize = 2 * kPointerSize;
static const int kConstantPoolOffset = 0; // Not used.
static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
DEFINE_TYPED_FRAME_SIZES(2);
// The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = 0 * kPointerSize;
......@@ -177,7 +176,7 @@ class JavaScriptFrameConstants : public AllStatic {
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
static const int kLastParameterOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
// Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize;
......
......@@ -269,12 +269,6 @@ void CompareDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CompareNilDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ToBooleanDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2};
......
......@@ -558,23 +558,40 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
}
}
void MacroAssembler::PushFixedFrame(Register marker_reg) {
void MacroAssembler::PushCommonFrame(Register marker_reg) {
int fp_delta = 0;
CleanseP(r14);
if (marker_reg.is_valid()) {
Push(r14, fp, cp, marker_reg);
Push(r14, fp, marker_reg);
fp_delta = 1;
} else {
Push(r14, fp, cp);
Push(r14, fp);
fp_delta = 0;
}
la(fp, MemOperand(sp, fp_delta * kPointerSize));
}
void MacroAssembler::PopFixedFrame(Register marker_reg) {
void MacroAssembler::PopCommonFrame(Register marker_reg) {
if (marker_reg.is_valid()) {
Pop(r14, fp, cp, marker_reg);
Pop(r14, fp, marker_reg);
} else {
Pop(r14, fp, cp);
Pop(r14, fp);
}
}
void MacroAssembler::PushStandardFrame(Register function_reg) {
int fp_delta = 0;
CleanseP(r14);
if (function_reg.is_valid()) {
Push(r14, fp, cp, function_reg);
fp_delta = 2;
} else {
Push(r14, fp, cp);
fp_delta = 1;
}
la(fp, MemOperand(sp, fp_delta * kPointerSize));
}
void MacroAssembler::RestoreFrameStateForTailCall() {
// if (FLAG_enable_embedded_constant_pool) {
// LoadP(kConstantPoolRegister,
......@@ -868,6 +885,59 @@ void MacroAssembler::ConvertDoubleToUnsignedInt64(
clgdbr(m, Condition(0), dst, double_input);
ldgr(double_dst, dst);
}
#endif
#if !V8_TARGET_ARCH_S390X
void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
DCHECK(!AreAliased(dst_low, src_high, shift));
DCHECK(!AreAliased(dst_high, src_low, shift));
UNIMPLEMENTED();
}
void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
DCHECK(!AreAliased(dst_low, src_high));
DCHECK(!AreAliased(dst_high, src_low));
UNIMPLEMENTED();
Label less_than_32;
Label done;
}
void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
DCHECK(!AreAliased(dst_low, src_high, shift));
DCHECK(!AreAliased(dst_high, src_low, shift));
UNIMPLEMENTED();
}
void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
DCHECK(!AreAliased(dst_low, src_high));
DCHECK(!AreAliased(dst_high, src_low));
UNIMPLEMENTED();
}
void MacroAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
DCHECK(!AreAliased(dst_low, src_high, shift));
DCHECK(!AreAliased(dst_high, src_low, shift));
UNIMPLEMENTED();
}
void MacroAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
DCHECK(!AreAliased(dst_low, src_high));
DCHECK(!AreAliased(dst_high, src_low));
UNIMPLEMENTED();
}
#endif
void MacroAssembler::MovDoubleToInt64(Register dst, DoubleRegister src) {
......@@ -878,11 +948,10 @@ void MacroAssembler::MovInt64ToDouble(DoubleRegister dst, Register src) {
ldgr(dst, src);
}
void MacroAssembler::StubPrologue(Register base, int prologue_offset) {
PushFixedFrame();
Push(Smi::FromInt(StackFrame::STUB));
// Adjust FP to point to saved FP.
la(fp, MemOperand(sp, StandardFrameConstants::kFixedFrameSizeFromFp));
void MacroAssembler::StubPrologue(StackFrame::Type type, Register base,
int prologue_offset) {
LoadSmiLiteral(r1, Smi::FromInt(type));
PushCommonFrame(r1);
}
void MacroAssembler::Prologue(bool code_pre_aging, Register base,
......@@ -911,9 +980,7 @@ void MacroAssembler::Prologue(bool code_pre_aging, Register base,
}
} else {
// This matches the code found in GetNoCodeAgeSequence()
PushFixedFrame(r3);
// Adjust fp to point to saved fp.
la(fp, MemOperand(sp, StandardFrameConstants::kFixedFrameSizeFromFp));
PushStandardFrame(r3);
}
}
}
......@@ -935,13 +1002,12 @@ void MacroAssembler::EnterFrame(StackFrame::Type type,
// CodeObject <-- new sp
LoadSmiLiteral(ip, Smi::FromInt(type));
PushFixedFrame(ip);
PushCommonFrame(ip);
mov(r0, Operand(CodeObject()));
push(r0);
// Adjust FP to point to saved FP
la(fp, MemOperand(
sp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
if (type == StackFrame::INTERNAL) {
mov(r0, Operand(CodeObject()));
push(r0);
}
}
int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
......@@ -991,10 +1057,10 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// all of the pushes that have happened inside of V8
// since we were called from C code
CleanseP(r14);
Push(r14, fp);
LoadRR(fp, sp);
LoadSmiLiteral(r1, Smi::FromInt(StackFrame::EXIT));
PushCommonFrame(r1);
// Reserve room for saved entry sp and code object.
lay(sp, MemOperand(sp, -ExitFrameConstants::kFrameSize));
lay(sp, MemOperand(fp, -ExitFrameConstants::kFixedFrameSizeFromFp));
if (emit_debug_code()) {
StoreP(MemOperand(fp, ExitFrameConstants::kSPOffset), Operand::Zero(), r1);
......@@ -1070,7 +1136,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
if (save_doubles) {
// Calculate the stack location of the saved doubles and restore them.
const int kNumRegs = kNumCallerSavedDoubles;
lay(r5, MemOperand(fp, -(ExitFrameConstants::kFrameSize +
lay(r5, MemOperand(fp, -(ExitFrameConstants::kFixedFrameSizeFromFp +
kNumRegs * kDoubleSize)));
MultiPopDoubles(kCallerSavedDoubles, r5);
}
......@@ -1108,6 +1174,71 @@ void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
Move(dst, d0);
}
void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg,
Register scratch0, Register scratch1) {
#if DEBUG
if (callee_args_count.is_reg()) {
DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
scratch1));
} else {
DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
}
#endif
// Calculate the end of destination area where we will put the arguments
// after we drop current frame. We AddP kPointerSize to count the receiver
// argument which is not included into formal parameters count.
Register dst_reg = scratch0;
ShiftLeftP(dst_reg, caller_args_count_reg, Operand(kPointerSizeLog2));
AddP(dst_reg, fp, dst_reg);
AddP(dst_reg, dst_reg,
Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
Register src_reg = caller_args_count_reg;
// Calculate the end of source area. +kPointerSize is for the receiver.
if (callee_args_count.is_reg()) {
ShiftLeftP(src_reg, callee_args_count.reg(), Operand(kPointerSizeLog2));
AddP(src_reg, sp, src_reg);
AddP(src_reg, src_reg, Operand(kPointerSize));
} else {
mov(src_reg, Operand((callee_args_count.immediate() + 1) * kPointerSize));
AddP(src_reg, src_reg, sp);
}
if (FLAG_debug_code) {
CmpLogicalP(src_reg, dst_reg);
Check(lt, kStackAccessBelowStackPointer);
}
// Restore caller's frame pointer and return address now as they will be
// overwritten by the copying loop.
RestoreFrameStateForTailCall();
// Now copy callee arguments to the caller frame going backwards to avoid
// callee arguments corruption (source and destination areas could overlap).
// Both src_reg and dst_reg are pointing to the word after the one to copy,
// so they must be pre-decremented in the loop.
Register tmp_reg = scratch1;
Label loop;
if (callee_args_count.is_reg()) {
AddP(tmp_reg, callee_args_count.reg(), Operand(1)); // +1 for receiver
} else {
mov(tmp_reg, Operand(callee_args_count.immediate() + 1));
}
LoadRR(r1, tmp_reg);
bind(&loop);
LoadP(tmp_reg, MemOperand(src_reg, -kPointerSize));
StoreP(tmp_reg, MemOperand(dst_reg, -kPointerSize));
lay(src_reg, MemOperand(src_reg, -kPointerSize));
lay(dst_reg, MemOperand(dst_reg, -kPointerSize));
BranchOnCount(r1, &loop);
// Leave current frame.
LoadRR(sp, dst_reg);
}
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual, Label* done,
bool* definitely_mismatches,
......@@ -1379,8 +1510,20 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
DCHECK(!holder_reg.is(ip));
DCHECK(!scratch.is(ip));
// Load current lexical context from the stack frame.
LoadP(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Load current lexical context from the active StandardFrame, which
// may require crawling past STUB frames.
Label load_context;
Label has_context;
DCHECK(!ip.is(scratch));
LoadRR(ip, fp);
bind(&load_context);
LoadP(scratch,
MemOperand(ip, CommonFrameConstants::kContextOrFrameTypeOffset));
JumpIfNotSmi(scratch, &has_context);
LoadP(ip, MemOperand(ip, CommonFrameConstants::kCallerFPOffset));
b(&load_context);
bind(&has_context);
// In debug mode, make sure the lexical context is set.
#ifdef DEBUG
CmpP(scratch, Operand::Zero());
......
......@@ -208,12 +208,6 @@ class MacroAssembler : public Assembler {
void Call(Label* target);
// Emit call to the code we are currently generating.
void CallSelf() {
Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
Call(self, RelocInfo::CODE_TARGET);
}
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
void Move(Register dst, Handle<Object> value);
......@@ -598,10 +592,14 @@ class MacroAssembler : public Assembler {
la(sp, MemOperand(sp, 5 * kPointerSize));
}
// Push a fixed frame, consisting of lr, fp, context and
// JS function / marker id if marker_reg is a valid register.
void PushFixedFrame(Register marker_reg = no_reg);
void PopFixedFrame(Register marker_reg = no_reg);
// Push a fixed frame, consisting of lr, fp, constant pool.
void PushCommonFrame(Register marker_reg = no_reg);
// Push a standard frame, consisting of lr, fp, constant pool,
// context and JS function
void PushStandardFrame(Register function_reg);
void PopCommonFrame(Register marker_reg = no_reg);
// Restore caller's frame pointer and return address prior to being
// overwritten by tail call stack preparation.
......@@ -696,8 +694,25 @@ class MacroAssembler : public Assembler {
FPRoundingMode rounding_mode = kRoundToZero);
#endif
#if !V8_TARGET_ARCH_S390X
void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, Register scratch, Register shift);
void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, Register scratch, Register shift);
void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
void ShiftRightArithPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift);
void ShiftRightArithPair(Register dst_low, Register dst_high,
Register src_low, Register src_high, uint32_t shift);
#endif
// Generates function and stub prologue code.
void StubPrologue(Register base = no_reg, int prologue_offset = 0);
void StubPrologue(StackFrame::Type type, Register base = no_reg,
int prologue_offset = 0);
void Prologue(bool code_pre_aging, Register base, int prologue_offset = 0);
// Enter exit frame.
......@@ -819,6 +834,15 @@ class MacroAssembler : public Assembler {
// call sites.
// void SetCallKind(Register dst, CallKind kind);
// Removes current frame and its arguments from the stack preserving
// the arguments and a return address pushed to the stack for the next call.
// Both |callee_args_count| and |caller_args_count_reg| do not include
// receiver. |callee_args_count| is not modified, |caller_args_count_reg|
// is trashed.
void PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg, Register scratch0,
Register scratch1);
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
......
......@@ -3819,60 +3819,11 @@ bool Simulator::DecodeSixByte(Instruction* instr) {
}
case SLLK:
case RLL:
case SRLK: {
// For SLLK/SRLL, the 32-bit third operand is shifted the number
// of bits specified by the second-operand address, and the result is
// placed at the first-operand location. Except for when the R1 and R3
// fields designate the same register, the third operand remains
// unchanged in general register R3.
int r1 = rsyInstr->R1Value();
int r3 = rsyInstr->R3Value();
int b2 = rsyInstr->B2Value();
intptr_t d2 = rsyInstr->D2Value();
// only takes rightmost 6 bits
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int shiftBits = (b2_val + d2) & 0x3F;
// unsigned
uint32_t r3_val = get_low_register<uint32_t>(r3);
uint32_t alu_out = 0;
if (SLLK == op) {
alu_out = r3_val << shiftBits;
} else if (SRLK == op) {
alu_out = r3_val >> shiftBits;
} else if (RLL == op) {
uint32_t rotateBits = r3_val >> (32 - shiftBits);
alu_out = (r3_val << shiftBits) | (rotateBits);
} else {
UNREACHABLE();
}
set_low_register(r1, alu_out);
break;
}
case SRLK:
case SLLG:
case RLLG:
case SRLG: {
// For SLLG/SRLG, the 64-bit third operand is shifted the number
// of bits specified by the second-operand address, and the result is
// placed at the first-operand location. Except for when the R1 and R3
// fields designate the same register, the third operand remains
// unchanged in general register R3.
int r1 = rsyInstr->R1Value();
int r3 = rsyInstr->R3Value();
int b2 = rsyInstr->B2Value();
intptr_t d2 = rsyInstr->D2Value();
// only takes rightmost 6 bits
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int shiftBits = (b2_val + d2) & 0x3F;
// unsigned
uint64_t r3_val = get_register(r3);
uint64_t alu_out = 0;
if (op == SLLG) {
alu_out = r3_val << shiftBits;
} else if (op == SRLG) {
alu_out = r3_val >> shiftBits;
} else {
UNREACHABLE();
}
set_register(r1, alu_out);
DecodeSixByteBitShift(instr);
break;
}
case SLAK:
......@@ -4196,6 +4147,81 @@ bool Simulator::DecodeSixByte(Instruction* instr) {
return true;
}
void Simulator::DecodeSixByteBitShift(Instruction* instr) {
Opcode op = instr->S390OpcodeValue();
// Pre-cast instruction to various types
RSYInstruction* rsyInstr = reinterpret_cast<RSYInstruction*>(instr);
switch (op) {
case SLLK:
case RLL:
case SRLK: {
// For SLLK/SRLL, the 32-bit third operand is shifted the number
// of bits specified by the second-operand address, and the result is
// placed at the first-operand location. Except for when the R1 and R3
// fields designate the same register, the third operand remains
// unchanged in general register R3.
int r1 = rsyInstr->R1Value();
int r3 = rsyInstr->R3Value();
int b2 = rsyInstr->B2Value();
intptr_t d2 = rsyInstr->D2Value();
// only takes rightmost 6 bits
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int shiftBits = (b2_val + d2) & 0x3F;
// unsigned
uint32_t r3_val = get_low_register<uint32_t>(r3);
uint32_t alu_out = 0;
if (SLLK == op) {
alu_out = r3_val << shiftBits;
} else if (SRLK == op) {
alu_out = r3_val >> shiftBits;
} else if (RLL == op) {
uint32_t rotateBits = r3_val >> (32 - shiftBits);
alu_out = (r3_val << shiftBits) | (rotateBits);
} else {
UNREACHABLE();
}
set_low_register(r1, alu_out);
break;
}
case SLLG:
case RLLG:
case SRLG: {
// For SLLG/SRLG, the 64-bit third operand is shifted the number
// of bits specified by the second-operand address, and the result is
// placed at the first-operand location. Except for when the R1 and R3
// fields designate the same register, the third operand remains
// unchanged in general register R3.
int r1 = rsyInstr->R1Value();
int r3 = rsyInstr->R3Value();
int b2 = rsyInstr->B2Value();
intptr_t d2 = rsyInstr->D2Value();
// only takes rightmost 6 bits
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int shiftBits = (b2_val + d2) & 0x3F;
// unsigned
uint64_t r3_val = get_register(r3);
uint64_t alu_out = 0;
if (op == SLLG) {
alu_out = r3_val << shiftBits;
} else if (op == SRLG) {
alu_out = r3_val >> shiftBits;
} else if (op == RLLG) {
uint64_t rotateBits = r3_val >> (64 - shiftBits);
alu_out = (r3_val << shiftBits) | (rotateBits);
} else {
UNREACHABLE();
}
set_register(r1, alu_out);
break;
}
default:
UNREACHABLE();
}
}
/**
* Decodes and simulates six byte arithmetic instructions
*/
......
......@@ -316,6 +316,7 @@ class Simulator {
bool DecodeSixByte(Instruction* instr);
bool DecodeSixByteArithmetic(Instruction* instr);
bool S390InstructionDecode(Instruction* instr);
void DecodeSixByteBitShift(Instruction* instr);
// Used by the CL**BR instructions.
template <typename T1, typename T2>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment