builtins-s390.cc 129 KB
Newer Older
1 2 3 4 5 6
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#if V8_TARGET_ARCH_S390

7
#include "src/api/api-arguments.h"
8
#include "src/codegen/code-factory.h"
9
#include "src/codegen/interface-descriptors-inl.h"
10 11 12
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/register-configuration.h"
13
#include "src/debug/debug.h"
14
#include "src/deoptimizer/deoptimizer.h"
15 16
#include "src/execution/frame-constants.h"
#include "src/execution/frames.h"
17
#include "src/heap/heap-inl.h"
18
#include "src/logging/counters.h"
19
#include "src/objects/cell.h"
20
#include "src/objects/foreign.h"
21
#include "src/objects/heap-number.h"
22
#include "src/objects/js-generator.h"
23
#include "src/objects/smi.h"
24
#include "src/runtime/runtime.h"
25 26

#if V8_ENABLE_WEBASSEMBLY
27
#include "src/wasm/wasm-linkage.h"
28
#include "src/wasm/wasm-objects.h"
29
#endif  // V8_ENABLE_WEBASSEMBLY
30 31 32 33 34 35

namespace v8 {
namespace internal {

#define __ ACCESS_MASM(masm)

36
void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
37
  __ Move(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
38 39
  __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
          RelocInfo::CODE_TARGET);
40 41
}

42 43 44
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
                                           Runtime::FunctionId function_id) {
  // ----------- S t a t e -------------
45
  //  -- r2 : actual argument count
46 47 48 49 50
  //  -- r3 : target function (preserved for callee)
  //  -- r5 : new target (preserved for callee)
  // -----------------------------------
  {
    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
51 52
    // Push a copy of the target function, the new target and the actual
    // argument count.
53
    // Push function as parameter to the runtime call.
54 55 56
    __ SmiTag(kJavaScriptCallArgCountRegister);
    __ Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
            kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister);
57 58

    __ CallRuntime(function_id, 1);
59
    __ mov(r4, r2);
60

61 62 63 64
    // Restore target function, new target and actual argument count.
    __ Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
           kJavaScriptCallArgCountRegister);
    __ SmiUntag(kJavaScriptCallArgCountRegister);
65
  }
66
  static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
67
  __ JumpCodeObject(r4);
68 69
}

70 71
namespace {

72
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
73 74 75 76
  // ----------- S t a t e -------------
  //  -- r2     : number of arguments
  //  -- r3     : constructor function
  //  -- r5     : new target
77
  //  -- cp     : context
78 79 80 81
  //  -- lr     : return address
  //  -- sp[...]: constructor arguments
  // -----------------------------------

82
  Register scratch = r4;
83 84
  Label stack_overflow;

85
  __ StackOverflowCheck(r2, scratch, &stack_overflow);
86

87 88 89 90 91
  // Enter a construct frame.
  {
    FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);

    // Preserve the incoming parameters on the stack.
92 93 94
    __ SmiTag(r2);
    __ Push(cp, r2);
    __ SmiUntag(r2);
95

96 97 98 99 100
    // TODO(victorgomes): When the arguments adaptor is completely removed, we
    // should get the formal parameter count and copy the arguments in its
    // correct position (including any undefined), instead of delaying this to
    // InvokeFunction.

101 102 103 104 105 106 107
    // Set up pointer to last argument (skip receiver).
    __ la(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
                                 kSystemPointerSize));
    // Copy arguments and receiver to the expression stack.
    __ PushArray(r6, r2, r1, r0);
    // The receiver for the builtin/api call.
    __ PushRoot(RootIndex::kTheHoleValue);
108 109 110 111 112

    // Call the function.
    // r2: number of arguments
    // r3: constructor function
    // r5: new target
113

114
    __ InvokeFunctionWithNewTarget(r3, r5, r2, InvokeType::kCall);
115 116

    // Restore context from the frame.
117
    __ LoadU64(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
118
    // Restore smi-tagged arguments count from the frame.
119
    __ LoadU64(scratch, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
120 121 122

    // Leave construct frame.
  }
123 124
  // Remove caller arguments from the stack and return.
  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
125

126
  __ SmiToPtrArrayOffset(scratch, scratch);
127 128
  __ AddS64(sp, sp, scratch);
  __ AddS64(sp, sp, Operand(kSystemPointerSize));
129
  __ Ret();
130

131 132 133 134 135 136
  __ bind(&stack_overflow);
  {
    FrameScope scope(masm, StackFrame::INTERNAL);
    __ CallRuntime(Runtime::kThrowStackOverflow);
    __ bkpt(0);  // Unreachable code.
  }
137 138
}

139 140
}  // namespace

141
// The construct stub for ES5 constructor functions and ES6 class constructors.
142
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
143 144 145 146 147 148 149 150 151
  // ----------- S t a t e -------------
  //  --      r2: number of arguments (untagged)
  //  --      r3: constructor function
  //  --      r5: new target
  //  --      cp: context
  //  --      lr: return address
  //  -- sp[...]: constructor arguments
  // -----------------------------------

152
  FrameScope scope(masm, StackFrame::MANUAL);
153
  // Enter a construct frame.
154 155
  Label post_instantiation_deopt_entry, not_create_implicit_receiver;
  __ EnterFrame(StackFrame::CONSTRUCT);
156

157 158 159 160 161
  // Preserve the incoming parameters on the stack.
  __ SmiTag(r2);
  __ Push(cp, r2, r3);
  __ PushRoot(RootIndex::kUndefinedValue);
  __ Push(r5);
162

163 164 165 166 167 168 169
  // ----------- S t a t e -------------
  //  --        sp[0*kSystemPointerSize]: new target
  //  --        sp[1*kSystemPointerSize]: padding
  //  -- r3 and sp[2*kSystemPointerSize]: constructor function
  //  --        sp[3*kSystemPointerSize]: number of arguments (tagged)
  //  --        sp[4*kSystemPointerSize]: context
  // -----------------------------------
170

171 172
  __ LoadTaggedPointerField(
      r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
173
  __ LoadU32(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
174 175 176
  __ DecodeField<SharedFunctionInfo::FunctionKindBits>(r6);
  __ JumpIfIsInRange(r6, kDefaultDerivedConstructor, kDerivedConstructor,
                     &not_create_implicit_receiver);
177

178 179 180 181 182
  // If not derived class constructor: Allocate the new receiver object.
  __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, r6,
                      r7);
  __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
  __ b(&post_instantiation_deopt_entry);
183

184 185 186
  // Else: use TheHoleValue as receiver for constructor call
  __ bind(&not_create_implicit_receiver);
  __ LoadRoot(r2, RootIndex::kTheHoleValue);
187

188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
  // ----------- S t a t e -------------
  //  --                          r2: receiver
  //  -- Slot 4 / sp[0*kSystemPointerSize]: new target
  //  -- Slot 3 / sp[1*kSystemPointerSize]: padding
  //  -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
  //  -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged)
  //  -- Slot 0 / sp[4*kSystemPointerSize]: context
  // -----------------------------------
  // Deoptimizer enters here.
  masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
      masm->pc_offset());
  __ bind(&post_instantiation_deopt_entry);

  // Restore new target.
  __ Pop(r5);

  // Push the allocated receiver to the stack.
  __ Push(r2);
  // We need two copies because we may have to return the original one
  // and the calling conventions dictate that the called function pops the
  // receiver. The second copy is pushed after the arguments, we saved in r6
  // since r0 needs to store the number of arguments before
  // InvokingFunction.
211
  __ mov(r8, r2);
212 213 214 215

  // Set up pointer to first argument (skip receiver).
  __ la(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
                               kSystemPointerSize));
216

217 218 219 220 221 222 223 224 225
  // ----------- S t a t e -------------
  //  --                 r5: new target
  //  -- sp[0*kSystemPointerSize]: implicit receiver
  //  -- sp[1*kSystemPointerSize]: implicit receiver
  //  -- sp[2*kSystemPointerSize]: padding
  //  -- sp[3*kSystemPointerSize]: constructor function
  //  -- sp[4*kSystemPointerSize]: number of arguments (tagged)
  //  -- sp[5*kSystemPointerSize]: context
  // -----------------------------------
226

227
  // Restore constructor function and argument count.
228 229
  __ LoadU64(r3, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
  __ LoadU64(r2, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
230
  __ SmiUntag(r2);
231

232
  Label stack_overflow;
233
  __ StackOverflowCheck(r2, r7, &stack_overflow);
234

235 236
  // Copy arguments and receiver to the expression stack.
  __ PushArray(r6, r2, r1, r0);
237

238 239
  // Push implicit receiver.
  __ Push(r8);
240

241
  // Call the function.
242
  __ InvokeFunctionWithNewTarget(r3, r5, r2, InvokeType::kCall);
243

244 245 246 247 248 249 250 251
  // ----------- S t a t e -------------
  //  --                 r0: constructor result
  //  -- sp[0*kSystemPointerSize]: implicit receiver
  //  -- sp[1*kSystemPointerSize]: padding
  //  -- sp[2*kSystemPointerSize]: constructor function
  //  -- sp[3*kSystemPointerSize]: number of arguments
  //  -- sp[4*kSystemPointerSize]: context
  // -----------------------------------
252

253 254 255
  // Store offset of return address for deoptimizer.
  masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
      masm->pc_offset());
256

257 258 259 260
  // If the result is an object (in the ECMA sense), we should get rid
  // of the receiver and use the result; see ECMA-262 section 13.2.2-7
  // on page 74.
  Label use_receiver, do_throw, leave_and_return, check_receiver;
261

262 263
  // If the result is undefined, we jump out to using the implicit receiver.
  __ JumpIfNotRoot(r2, RootIndex::kUndefinedValue, &check_receiver);
264

265 266
  // Otherwise we do a smi check and fall through to check if the return value
  // is a valid receiver.
267

268 269 270
  // Throw away the result of the constructor invocation and use the
  // on-stack receiver as the result.
  __ bind(&use_receiver);
271
  __ LoadU64(r2, MemOperand(sp));
272
  __ JumpIfRoot(r2, RootIndex::kTheHoleValue, &do_throw);
273

274 275
  __ bind(&leave_and_return);
  // Restore smi-tagged arguments count from the frame.
276
  __ LoadU64(r3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
277 278
  // Leave construct frame.
  __ LeaveFrame(StackFrame::CONSTRUCT);
279 280 281 282 283

  // Remove caller arguments from the stack and return.
  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);

  __ SmiToPtrArrayOffset(r3, r3);
284 285
  __ AddS64(sp, sp, r3);
  __ AddS64(sp, sp, Operand(kSystemPointerSize));
286
  __ Ret();
287 288 289 290 291 292 293 294 295 296 297 298 299 300

  __ bind(&check_receiver);
  // If the result is a smi, it is *not* an object in the ECMA sense.
  __ JumpIfSmi(r2, &use_receiver);

  // If the type of the result (stored in its map) is less than
  // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
  STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
  __ CompareObjectType(r2, r6, r6, FIRST_JS_RECEIVER_TYPE);
  __ bge(&leave_and_return);
  __ b(&use_receiver);

  __ bind(&do_throw);
  // Restore the context from the frame.
301
  __ LoadU64(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
302 303 304 305 306
  __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
  __ bkpt(0);

  __ bind(&stack_overflow);
  // Restore the context from the frame.
307
  __ LoadU64(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
308 309 310
  __ CallRuntime(Runtime::kThrowStackOverflow);
  // Unreachable code.
  __ bkpt(0);
311 312
}

313 314
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
  Generate_JSBuiltinsConstructStubHelper(masm);
315 316
}

317 318 319 320 321 322 323
static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
                                          Register sfi_data,
                                          Register scratch1) {
  Label done;

  __ CompareObjectType(sfi_data, scratch1, scratch1, INTERPRETER_DATA_TYPE);
  __ bne(&done, Label::kNear);
324 325 326
  __ LoadTaggedPointerField(
      sfi_data,
      FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
327 328 329
  __ bind(&done);
}

330 331 332 333 334 335 336 337
// static
void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
  // ----------- S t a t e -------------
  //  -- r2 : the value to pass to the generator
  //  -- r3 : the JSGeneratorObject to resume
  //  -- lr : return address
  // -----------------------------------
  // Store input value into generator object.
338 339
  __ StoreTaggedField(
      r2, FieldMemOperand(r3, JSGeneratorObject::kInputOrDebugPosOffset), r0);
340
  __ RecordWriteField(r3, JSGeneratorObject::kInputOrDebugPosOffset, r2, r5,
341
                      kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore);
342 343
  // Check that r3 is still valid, RecordWrite might have clobbered it.
  __ AssertGeneratorObject(r3);
344 345

  // Load suspended function and context.
346 347 348 349
  __ LoadTaggedPointerField(
      r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
  __ LoadTaggedPointerField(cp,
                            FieldMemOperand(r6, JSFunction::kContextOffset));
350 351

  // Flood function if we are stepping.
352 353
  Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
  Label stepping_prepared;
354 355
  Register scratch = r7;

356 357
  ExternalReference debug_hook =
      ExternalReference::debug_hook_on_function_call_address(masm->isolate());
358
  __ Move(scratch, debug_hook);
359
  __ LoadS8(scratch, MemOperand(scratch));
360
  __ CmpSmiLiteral(scratch, Smi::zero(), r0);
361
  __ bne(&prepare_step_in_if_stepping);
362 363 364 365 366 367

  // Flood function if we need to continue stepping in the suspended generator.

  ExternalReference debug_suspended_generator =
      ExternalReference::debug_suspended_generator_address(masm->isolate());

368
  __ Move(scratch, debug_suspended_generator);
369
  __ LoadU64(scratch, MemOperand(scratch));
370
  __ CmpS64(scratch, r3);
371 372
  __ beq(&prepare_step_in_suspended_generator);
  __ bind(&stepping_prepared);
373

374 375 376
  // Check the stack for overflow. We are not trying to catch interruptions
  // (i.e. debug break and preemption) here, so check the "real stack limit".
  Label stack_overflow;
377 378
  __ LoadU64(scratch,
             __ StackLimitAsMemOperand(StackLimitKind::kRealStackLimit));
379
  __ CmpU64(sp, scratch);
380 381
  __ blt(&stack_overflow);

382 383 384 385 386 387 388
  // ----------- S t a t e -------------
  //  -- r3    : the JSGeneratorObject to resume
  //  -- r6    : generator function
  //  -- cp    : generator context
  //  -- lr    : return address
  // -----------------------------------

389
  // Copy the function arguments from the generator object's register file.
390 391
  __ LoadTaggedPointerField(
      r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
392
  __ LoadU16(
393
      r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
394 395 396
  __ LoadTaggedPointerField(
      r4,
      FieldMemOperand(r3, JSGeneratorObject::kParametersAndRegistersOffset));
397
  {
398 399
    Label done_loop, loop;
    __ bind(&loop);
400
    __ SubS64(r5, r5, Operand(1));
401
    __ blt(&done_loop);
402
    __ ShiftLeftU64(r1, r5, Operand(kTaggedSizeLog2));
403 404 405 406 407 408 409 410 411 412 413
    __ la(scratch, MemOperand(r4, r1));
    __ LoadAnyTaggedField(scratch,
                          FieldMemOperand(scratch, FixedArray::kHeaderSize));
    __ Push(scratch);
    __ b(&loop);
    __ bind(&done_loop);

    // Push receiver.
    __ LoadAnyTaggedField(
        scratch, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
    __ Push(scratch);
414 415
  }

416 417
  // Underlying function needs to have bytecode available.
  if (FLAG_debug_code) {
418 419 420 421
    __ LoadTaggedPointerField(
        r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
    __ LoadTaggedPointerField(
        r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
422
    GetSharedFunctionInfoBytecode(masm, r5, ip);
423
    __ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
424
    __ Assert(eq, AbortReason::kMissingBytecodeArray);
425
  }
426

427
  // Resume (Ignition/TurboFan) generator object.
428
  {
429 430
    __ LoadTaggedPointerField(
        r2, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
431
    __ LoadS16(
432 433
        r2,
        FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
434 435 436
    // We abuse new.target both to indicate that this is a resume call and to
    // pass in the generator object.  In ordinary calls, new.target is always
    // undefined because generator functions are non-constructable.
437 438
    __ mov(r5, r3);
    __ mov(r3, r6);
439
    static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
440
    __ LoadTaggedPointerField(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
441
    __ JumpCodeObject(r4);
442
  }
443 444 445 446

  __ bind(&prepare_step_in_if_stepping);
  {
    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
447
    __ Push(r3, r6);
448
    // Push hole as receiver since we do not use it for stepping.
449
    __ PushRoot(RootIndex::kTheHoleValue);
450
    __ CallRuntime(Runtime::kDebugOnFunctionCall);
451
    __ Pop(r3);
452 453
    __ LoadTaggedPointerField(
        r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
454 455 456 457 458 459
  }
  __ b(&stepping_prepared);

  __ bind(&prepare_step_in_suspended_generator);
  {
    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
460
    __ Push(r3);
461
    __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
462
    __ Pop(r3);
463 464
    __ LoadTaggedPointerField(
        r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
465 466
  }
  __ b(&stepping_prepared);
467 468 469 470 471 472 473

  __ bind(&stack_overflow);
  {
    FrameScope scope(masm, StackFrame::INTERNAL);
    __ CallRuntime(Runtime::kThrowStackOverflow);
    __ bkpt(0);  // This should be unreachable.
  }
474 475
}

476 477 478 479 480 481
void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
  FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
  __ push(r3);
  __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}

482 483
namespace {

484
constexpr int kPushedStackSpace =
485 486 487
    (kNumCalleeSaved + 2) * kSystemPointerSize +
    kNumCalleeSavedDoubles * kDoubleSize + 5 * kSystemPointerSize +
    EntryFrameConstants::kCallerFPOffset - kSystemPointerSize;
488

489
// Called with the native C calling convention. The corresponding function
490
// signature is either:
491
//
492 493 494 495 496 497
//   using JSEntryFunction = GeneratedCode<Address(
//       Address root_register_value, Address new_target, Address target,
//       Address receiver, intptr_t argc, Address** args)>;
// or
//   using JSEntryFunction = GeneratedCode<Address(
//       Address root_register_value, MicrotaskQueue* microtask_queue)>;
498
void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
499
                             Builtin entry_trampoline) {
500 501 502 503 504 505 506 507 508 509
  // The register state is either:
  //   r2:                             root register value
  //   r3:                             code entry
  //   r4:                             function
  //   r5:                             receiver
  //   r6:                             argc
  //   [sp + 20 * kSystemPointerSize]: argv
  // or
  //   r2: root_register_value
  //   r3: microtask_queue
510 511 512

  Label invoke, handler_entry, exit;

513
  int pushed_stack_space = 0;
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
  {
    NoRootArrayScope no_root_array(masm);

    // saving floating point registers
    // 64bit ABI requires f8 to f15 be saved
    // http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_zSeries.html
    __ lay(sp, MemOperand(sp, -8 * kDoubleSize));
    __ std(d8, MemOperand(sp));
    __ std(d9, MemOperand(sp, 1 * kDoubleSize));
    __ std(d10, MemOperand(sp, 2 * kDoubleSize));
    __ std(d11, MemOperand(sp, 3 * kDoubleSize));
    __ std(d12, MemOperand(sp, 4 * kDoubleSize));
    __ std(d13, MemOperand(sp, 5 * kDoubleSize));
    __ std(d14, MemOperand(sp, 6 * kDoubleSize));
    __ std(d15, MemOperand(sp, 7 * kDoubleSize));
529
    pushed_stack_space += kNumCalleeSavedDoubles * kDoubleSize;
530 531 532

    // zLinux ABI
    //    Incoming parameters:
533 534 535 536 537 538
    //          r2: root register value
    //          r3: code entry
    //          r4: function
    //          r5: receiver
    //          r6: argc
    // [sp + 20 * kSystemPointerSize]: argv
539 540 541
    //    Requires us to save the callee-preserved registers r6-r13
    //    General convention is to also save r14 (return addr) and
    //    sp/r15 as well in a single STM/STMG
542
    __ lay(sp, MemOperand(sp, -10 * kSystemPointerSize));
543
    __ StoreMultipleP(r6, sp, MemOperand(sp, 0));
544
    pushed_stack_space += (kNumCalleeSaved + 2) * kSystemPointerSize;
545 546

    // Initialize the root register.
547
    // C calling convention. The first argument is passed in r2.
548
    __ mov(kRootRegister, r2);
549 550 551
  }

  // save r6 to r1
552
  __ mov(r0, r6);
553 554 555 556 557 558 559

  // Push a frame with special values setup to mark it as an entry frame.
  //   Bad FP (-1)
  //   SMI Marker
  //   SMI Marker
  //   kCEntryFPAddress
  //   Frame type
560 561
  __ lay(sp, MemOperand(sp, -5 * kSystemPointerSize));
  pushed_stack_space += 5 * kSystemPointerSize;
562 563

  // Push a bad frame pointer to fail if it is used.
564
  __ mov(r9, Operand(-1));
565 566 567 568

  __ mov(r8, Operand(StackFrame::TypeToMarker(type)));
  __ mov(r7, Operand(StackFrame::TypeToMarker(type)));
  // Save copies of the top frame descriptor on the stack.
569 570 571
  __ Move(r1, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
                                        masm->isolate()));
  __ LoadU64(r6, MemOperand(r1));
572
  __ StoreMultipleP(r6, r9, MemOperand(sp, kSystemPointerSize));
573

574 575 576 577 578 579 580
  // Clear c_entry_fp, now we've pushed its previous value to the stack.
  // If the c_entry_fp is not already zero and we don't clear it, the
  // SafeStackFrameIterator will assume we are executing C++ and miss the JS
  // frames on top.
  __ mov(r6, Operand::Zero());
  __ StoreU64(r6, MemOperand(r1));

581 582
  Register scrach = r8;

583
  // Set up frame pointer for the frame to be pushed.
584
  // Need to add kSystemPointerSize, because sp has one extra
585
  // frame already for the frame type being pushed later.
586 587 588 589
  __ lay(fp, MemOperand(sp, -EntryFrameConstants::kCallerFPOffset +
                                kSystemPointerSize));
  pushed_stack_space +=
      EntryFrameConstants::kCallerFPOffset - kSystemPointerSize;
590 591

  // restore r6
592
  __ mov(r6, r0);
593 594 595 596 597 598 599

  // If this is the outermost JS call, set js_entry_sp value.
  Label non_outermost_js;
  ExternalReference js_entry_sp =
      ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress,
                                masm->isolate());
  __ Move(r7, js_entry_sp);
600
  __ LoadAndTestP(scrach, MemOperand(r7));
601
  __ bne(&non_outermost_js, Label::kNear);
602
  __ StoreU64(fp, MemOperand(r7));
603
  __ mov(scrach, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
604 605 606
  Label cont;
  __ b(&cont, Label::kNear);
  __ bind(&non_outermost_js);
607
  __ mov(scrach, Operand(StackFrame::INNER_JSENTRY_FRAME));
608 609

  __ bind(&cont);
610
  __ StoreU64(scrach, MemOperand(sp));  // frame-type
611 612 613 614 615 616 617 618 619 620 621 622 623 624 625

  // Jump to a faked try block that does the invoke, with a faked catch
  // block that sets the pending exception.
  __ b(&invoke, Label::kNear);

  __ bind(&handler_entry);

  // Store the current pc as the handler offset. It's used later to create the
  // handler table.
  masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());

  // Caught exception: Store result (exception) in the pending exception
  // field in the JSEnv and return a failure sentinel.  Coming in here the
  // fp will be invalid because the PushStackHandler below sets it to 0 to
  // signal the existence of the JSEntry frame.
626 627 628
  __ Move(scrach,
          ExternalReference::Create(IsolateAddressId::kPendingExceptionAddress,
                                    masm->isolate()));
629

630
  __ StoreU64(r2, MemOperand(scrach));
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
  __ LoadRoot(r2, RootIndex::kException);
  __ b(&exit, Label::kNear);

  // Invoke: Link this frame into the handler chain.
  __ bind(&invoke);
  // Must preserve r2-r6.
  __ PushStackHandler();
  // If an exception not caught by another handler occurs, this handler
  // returns control to the code after the b(&invoke) above, which
  // restores all kCalleeSaved registers (including cp and fp) to their
  // saved values before returning a failure to C.

  // Invoke the function by calling through JS entry trampoline builtin.
  // Notice that we cannot store a reference to the trampoline code directly in
  // this stub, because runtime stubs are not traversed when doing GC.

  // Invoke the function by calling through JS entry trampoline builtin and
  // pop the faked function when we return.
  Handle<Code> trampoline_code =
650
      masm->isolate()->builtins()->code_handle(entry_trampoline);
651
  DCHECK_EQ(kPushedStackSpace, pushed_stack_space);
652 653 654 655 656 657 658 659 660
  __ Call(trampoline_code, RelocInfo::CODE_TARGET);

  // Unlink this frame from the handler chain.
  __ PopStackHandler();
  __ bind(&exit);  // r2 holds result

  // Check if the current stack frame is marked as the outermost JS frame.
  Label non_outermost_js_2;
  __ pop(r7);
661
  __ CmpS64(r7, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
662
  __ bne(&non_outermost_js_2, Label::kNear);
663
  __ mov(scrach, Operand::Zero());
664
  __ Move(r7, js_entry_sp);
665
  __ StoreU64(scrach, MemOperand(r7));
666 667 668 669
  __ bind(&non_outermost_js_2);

  // Restore the top frame descriptors from the stack.
  __ pop(r5);
670 671
  __ Move(scrach, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
                                            masm->isolate()));
672
  __ StoreU64(r5, MemOperand(scrach));
673 674 675 676 677 678

  // Reset the stack to the callee saved registers.
  __ lay(sp, MemOperand(sp, -EntryFrameConstants::kCallerFPOffset));

  // Reload callee-saved preserved regs, return address reg (r14) and sp
  __ LoadMultipleP(r6, sp, MemOperand(sp, 0));
679
  __ la(sp, MemOperand(sp, 10 * kSystemPointerSize));
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706

// saving floating point registers
#if V8_TARGET_ARCH_S390X
  // 64bit ABI requires f8 to f15 be saved
  __ ld(d8, MemOperand(sp));
  __ ld(d9, MemOperand(sp, 1 * kDoubleSize));
  __ ld(d10, MemOperand(sp, 2 * kDoubleSize));
  __ ld(d11, MemOperand(sp, 3 * kDoubleSize));
  __ ld(d12, MemOperand(sp, 4 * kDoubleSize));
  __ ld(d13, MemOperand(sp, 5 * kDoubleSize));
  __ ld(d14, MemOperand(sp, 6 * kDoubleSize));
  __ ld(d15, MemOperand(sp, 7 * kDoubleSize));
  __ la(sp, MemOperand(sp, 8 * kDoubleSize));
#else
  // 31bit ABI requires you to store f4 and f6:
  // http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_s390.html#AEN417
  __ ld(d4, MemOperand(sp));
  __ ld(d6, MemOperand(sp, kDoubleSize));
  __ la(sp, MemOperand(sp, 2 * kDoubleSize));
#endif

  __ b(r14);
}

}  // namespace

void Builtins::Generate_JSEntry(MacroAssembler* masm) {
707
  Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline);
708 709 710 711
}

void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
  Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
712
                          Builtin::kJSConstructEntryTrampoline);
713 714 715
}

void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
716
  Generate_JSEntryVariant(masm, StackFrame::ENTRY,
717
                          Builtin::kRunMicrotasksTrampoline);
718 719
}

720 721 722
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
                                             bool is_construct) {
  // Called from Generate_JS_Entry
723 724 725 726
  // r3: new.target
  // r4: function
  // r5: receiver
  // r6: argc
727
  // [fp + kPushedStackSpace + 20 * kSystemPointerSize]: argv
728
  // r0,r2,r7-r9, cp may be clobbered
729

730
  __ mov(r2, r6);
731
  // Load argv from the stack.
732
  __ LoadU64(
733 734 735 736 737 738 739 740
      r6, MemOperand(fp, kPushedStackSpace + EntryFrameConstants::kArgvOffset));

  // r2: argc
  // r3: new.target
  // r4: function
  // r5: receiver
  // r6: argv

741 742 743 744 745 746
  // Enter an internal frame.
  {
    // FrameScope ends up calling MacroAssembler::EnterFrame here
    FrameScope scope(masm, StackFrame::INTERNAL);

    // Setup the context (we need to use the caller context from the isolate).
747 748
    ExternalReference context_address = ExternalReference::Create(
        IsolateAddressId::kContextAddress, masm->isolate());
749
    __ Move(cp, context_address);
750
    __ LoadU64(cp, MemOperand(cp));
751

752 753
    // Push the function
    __ Push(r4);
754 755

    // Check if we have enough stack space to push all arguments.
756
    Label enough_stack_space, stack_overflow;
757
    __ AddS64(r7, r2, Operand(1));
758 759 760 761 762 763 764 765
    __ StackOverflowCheck(r7, r1, &stack_overflow);
    __ b(&enough_stack_space);
    __ bind(&stack_overflow);
    __ CallRuntime(Runtime::kThrowStackOverflow);
    // Unreachable code.
    __ bkpt(0);

    __ bind(&enough_stack_space);
766 767 768 769

    // Copy arguments to the stack in a loop from argv to sp.
    // The arguments are actually placed in reverse order on sp
    // compared to argv (i.e. arg1 is highest memory in sp).
770
    // r2: argc
771
    // r3: function
772
    // r5: new.target
773 774 775 776 777
    // r6: argv, i.e. points to first arg
    // r7: scratch reg to hold scaled argc
    // r8: scratch reg to hold arg handle
    // r9: scratch reg to hold index into argv
    Label argLoop, argExit;
778

779
    __ ShiftLeftU64(r9, r2, Operand(kSystemPointerSizeLog2));
780 781 782 783 784
    __ lay(r9, MemOperand(r6, r9, -kSystemPointerSize));  // point to last arg

    __ ltgr(r7, r2);

    __ beq(&argExit, Label::kNear);
785
    __ bind(&argLoop);
786

787 788
    __ LoadU64(r8, MemOperand(r9));  // read next parameter
    __ LoadU64(r0, MemOperand(r8));  // dereference handle
789 790
    __ Push(r0);
    __ lay(r9, MemOperand(r9, -kSystemPointerSize));  // r9++;
791
    __ SubS64(r7, r7, Operand(1));
792 793 794 795 796 797 798 799
    __ bgt(&argLoop);

    __ bind(&argExit);

    // Push the receiver.
    __ Push(r5);

    // Setup new.target, argc and function.
800 801
    __ mov(r5, r3);
    __ mov(r3, r4);
802 803 804
    // r2: argc
    // r3: function
    // r5: new.target
805 806 807

    // Initialize all JavaScript callee-saved registers, since they will be seen
    // by the garbage collector as part of handlers.
808
    __ LoadRoot(r4, RootIndex::kUndefinedValue);
809 810 811 812
    __ mov(r6, r4);
    __ mov(r7, r6);
    __ mov(r8, r6);
    __ mov(r9, r6);
813 814 815

    // Invoke the code.
    Handle<Code> builtin = is_construct
816
                               ? BUILTIN_CODE(masm->isolate(), Construct)
817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835
                               : masm->isolate()->builtins()->Call();
    __ Call(builtin, RelocInfo::CODE_TARGET);

    // Exit the JS frame and remove the parameters (except function), and
    // return.
  }
  __ b(r14);

  // r2: result
}

void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
  Generate_JSEntryTrampolineHelper(masm, false);
}

void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
  Generate_JSEntryTrampolineHelper(masm, true);
}

836
void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
837 838 839 840 841
  // This expects two C++ function parameters passed by Invoke() in
  // execution.cc.
  //   r2: root_register_value
  //   r3: microtask_queue

842
  __ mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(), r3);
843 844 845
  __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
}

846 847 848 849
static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
                                                Register optimized_code,
                                                Register closure,
                                                Register scratch1,
850 851 852 853
                                                Register slot_address) {
  DCHECK(!AreAliased(optimized_code, closure, scratch1, slot_address));
  DCHECK_EQ(closure, kJSFunctionRegister);
  DCHECK(!AreAliased(optimized_code, closure));
854
  // Store code entry in the closure.
855 856
  __ StoreTaggedField(optimized_code,
                      FieldMemOperand(closure, JSFunction::kCodeOffset), r0);
857 858 859 860 861
  // Write barrier clobbers scratch1 below.
  Register value = scratch1;
  __ mov(value, optimized_code);

  __ RecordWriteField(closure, JSFunction::kCodeOffset, value, slot_address,
862
                      kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore,
863
                      RememberedSetAction::kOmit, SmiCheck::kOmit);
864 865
}

866 867 868 869
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
                                  Register scratch2) {
  Register params_size = scratch1;
  // Get the size of the formal parameters + receiver (in bytes).
870 871
  __ LoadU64(params_size,
             MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
872
  __ LoadU32(params_size,
873 874 875 876
            FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));

  Register actual_params_size = scratch2;
  // Compute the size of the actual parameters + receiver (in bytes).
877 878
  __ LoadU64(actual_params_size,
             MemOperand(fp, StandardFrameConstants::kArgCOffset));
879 880
  __ ShiftLeftU64(actual_params_size, actual_params_size,
                  Operand(kSystemPointerSizeLog2));
881 882
  __ AddS64(actual_params_size, actual_params_size,
            Operand(kSystemPointerSize));
883 884 885 886

  // If actual is bigger than formal, then we should use it to free up the stack
  // arguments.
  Label corrected_args_count;
887
  __ CmpS64(params_size, actual_params_size);
888
  __ bge(&corrected_args_count);
889
  __ mov(params_size, actual_params_size);
890
  __ bind(&corrected_args_count);
891 892

  // Leave the frame (also dropping the register file).
893
  __ LeaveFrame(StackFrame::INTERPRETED);
894

895
  __ AddS64(sp, sp, params_size);
896 897
}

898
// Tail-call |function_id| if |actual_marker| == |expected_marker|
899
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
900 901
                                          Register actual_marker,
                                          OptimizationMarker expected_marker,
902 903
                                          Runtime::FunctionId function_id) {
  Label no_match;
904
  __ CmpS64(actual_marker, Operand(expected_marker));
905 906 907 908 909
  __ bne(&no_match);
  GenerateTailCallToReturnedCode(masm, function_id);
  __ bind(&no_match);
}

910 911 912
static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
                                      Register optimized_code_entry,
                                      Register scratch) {
913
  // ----------- S t a t e -------------
914
  //  -- r2 : actual argument count
915 916
  //  -- r5 : new target (preserved for callee if needed, and caller)
  //  -- r3 : target function (preserved for callee if needed, and caller)
917
  // -----------------------------------
918
  DCHECK(!AreAliased(r3, r5, optimized_code_entry, scratch));
919 920

  Register closure = r3;
921 922 923 924 925 926
  Label heal_optimized_code_slot;

  // If the optimized code is cleared, go to runtime to update the optimization
  // marker field.
  __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
                   &heal_optimized_code_slot);
927

928 929
  // Check if the optimized code is marked for deopt. If it is, call the
  // runtime to clear it.
930 931 932
  __ LoadTaggedPointerField(
      scratch,
      FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
933
  __ LoadS32(scratch, FieldMemOperand(
934 935
                        scratch, CodeDataContainer::kKindSpecificFlagsOffset));
  __ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0);
936
  __ bne(&heal_optimized_code_slot);
937 938 939 940 941 942 943 944

  // Optimized code is good, get it into the closure and link the closure
  // into the optimized functions list, then tail call the optimized code.
  ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
                                      scratch, r7);
  static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
  __ LoadCodeObjectEntry(r4, optimized_code_entry);
  __ Jump(r4);
945

946 947 948 949 950
  // Optimized code slot contains deoptimized code or code is cleared and
  // optimized code marker isn't updated. Evict the code, update the marker
  // and re-enter the closure's code.
  __ bind(&heal_optimized_code_slot);
  GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
951
}
952

953 954 955
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
                              Register optimization_marker) {
  // ----------- S t a t e -------------
956
  //  -- r2 : actual argument count
957 958 959
  //  -- r5 : new target (preserved for callee if needed, and caller)
  //  -- r3 : target function (preserved for callee if needed, and caller)
  //  -- feedback vector (preserved for caller if needed)
960 961
  //  -- optimization_marker : a int32 containing a non-zero optimization
  //  marker.
962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
  // -----------------------------------
  DCHECK(!AreAliased(feedback_vector, r3, r5, optimization_marker));

  // TODO(v8:8394): The logging of first execution will break if
  // feedback vectors are not allocated. We need to find a different way of
  // logging these events if required.
  TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
                                OptimizationMarker::kLogFirstExecution,
                                Runtime::kFunctionFirstExecution);
  TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
                                OptimizationMarker::kCompileOptimized,
                                Runtime::kCompileOptimized_NotConcurrent);
  TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
                                OptimizationMarker::kCompileOptimizedConcurrent,
                                Runtime::kCompileOptimized_Concurrent);

978 979 980
  // Marker should be one of LogFirstExecution / CompileOptimized /
  // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
  // here.
981
  if (FLAG_debug_code) {
982
    __ stop();
983 984 985
  }
}

986
// Advance the current bytecode offset. This simulates what all bytecode
987
// handlers do upon completion of the underlying operation. Will bail out to a
988 989 990
// label if the bytecode (without prefix) is a return bytecode. Will not advance
// the bytecode offset if the current bytecode is a JumpLoop, instead just
// re-executing the JumpLoop to jump to the correct bytecode.
991 992 993 994
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
                                          Register bytecode_array,
                                          Register bytecode_offset,
                                          Register bytecode, Register scratch1,
995
                                          Register scratch2, Label* if_return) {
996
  Register bytecode_size_table = scratch1;
997 998 999 1000 1001 1002 1003
  Register scratch3 = bytecode;

  // The bytecode offset value will be increased by one in wide and extra wide
  // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
  // will restore the original bytecode. In order to simplify the code, we have
  // a backup of it.
  Register original_bytecode_offset = scratch2;
1004
  DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
1005
                     bytecode, original_bytecode_offset));
1006 1007
  __ Move(bytecode_size_table,
          ExternalReference::bytecode_size_table_address());
1008
  __ Move(original_bytecode_offset, bytecode_offset);
1009 1010

  // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
1011
  Label process_bytecode, extra_wide;
1012 1013
  STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
  STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
1014 1015 1016
  STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
  STATIC_ASSERT(3 ==
                static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
1017
  __ CmpS64(bytecode, Operand(0x3));
1018
  __ bgt(&process_bytecode);
1019 1020
  __ tmll(bytecode, Operand(0x1));
  __ bne(&extra_wide);
1021 1022

  // Load the next bytecode and update table to the wide scaled table.
1023
  __ AddS64(bytecode_offset, bytecode_offset, Operand(1));
1024
  __ LoadU8(bytecode, MemOperand(bytecode_array, bytecode_offset));
1025
  __ AddS64(bytecode_size_table, bytecode_size_table,
1026
            Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount));
1027
  __ b(&process_bytecode);
1028 1029 1030

  __ bind(&extra_wide);
  // Load the next bytecode and update table to the extra wide scaled table.
1031
  __ AddS64(bytecode_offset, bytecode_offset, Operand(1));
1032
  __ LoadU8(bytecode, MemOperand(bytecode_array, bytecode_offset));
1033
  __ AddS64(bytecode_size_table, bytecode_size_table,
1034
            Operand(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount));
1035

1036
  // Load the size of the current bytecode.
1037 1038
  __ bind(&process_bytecode);

1039
  // Bailout to the return label if this is a return bytecode.
1040 1041 1042
#define JUMP_IF_EQUAL(NAME)                                             \
  __ CmpS64(bytecode,                                                   \
            Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
1043 1044 1045 1046
  __ beq(if_return);
  RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL

1047 1048 1049
  // If this is a JumpLoop, re-execute it to perform the jump to the beginning
  // of the loop.
  Label end, not_jump_loop;
1050 1051
  __ CmpS64(bytecode,
            Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
1052 1053 1054 1055 1056 1057 1058
  __ bne(&not_jump_loop);
  // We need to restore the original bytecode_offset since we might have
  // increased it to skip the wide / extra-wide prefix bytecode.
  __ Move(bytecode_offset, original_bytecode_offset);
  __ b(&end);

  __ bind(&not_jump_loop);
1059
  // Otherwise, load the size of the current bytecode and advance the offset.
1060
  __ LoadU8(scratch3, MemOperand(bytecode_size_table, bytecode));
1061
  __ AddS64(bytecode_offset, bytecode_offset, scratch3);
1062 1063

  __ bind(&end);
1064 1065
}

1066 1067 1068
static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
    MacroAssembler* masm, Register optimization_state,
    Register feedback_vector) {
1069
  DCHECK(!AreAliased(optimization_state, feedback_vector));
1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
  Label maybe_has_optimized_code;
  // Check if optimized code is available
  __ TestBitMask(optimization_state,
                 FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker,
                 r0);
  __ beq(&maybe_has_optimized_code);

  Register optimization_marker = optimization_state;
  __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
  MaybeOptimizeCode(masm, feedback_vector, optimization_marker);

  __ bind(&maybe_has_optimized_code);
  Register optimized_code_entry = optimization_state;
  __ LoadAnyTaggedField(
      optimization_marker,
      FieldMemOperand(feedback_vector,
                      FeedbackVector::kMaybeOptimizedCodeOffset));
  TailCallOptimizedCodeSlot(masm, optimized_code_entry, r8);
}

1090 1091
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
1092
// stack left to right.
1093 1094
//
// The live registers are:
1095
//   o r2: actual argument count (not including the receiver)
1096
//   o r3: the JS function object being called.
1097
//   o r5: the incoming new target or generator object
1098 1099 1100 1101 1102 1103 1104 1105 1106
//   o cp: our context
//   o pp: the caller's constant pool pointer (if enabled)
//   o fp: the caller's frame pointer
//   o sp: stack pointer
//   o lr: return address
//
// The function builds an interpreter frame.  See InterpreterFrameConstants in
// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
1107 1108 1109
  Register closure = r3;
  Register feedback_vector = r4;

1110 1111
  // Get the bytecode array from the function object and load it into
  // kInterpreterBytecodeArrayRegister.
1112
  __ LoadTaggedPointerField(
1113
      r6, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1114
  // Load original bytecode array or the debug copy.
1115 1116
  __ LoadTaggedPointerField(
      kInterpreterBytecodeArrayRegister,
1117 1118
      FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
  GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, ip);
1119 1120 1121 1122

  // The bytecode array could have been flushed from the shared function info,
  // if so, call into CompileLazy.
  Label compile_lazy;
1123
  __ CompareObjectType(kInterpreterBytecodeArrayRegister, r6, no_reg,
1124 1125 1126
                       BYTECODE_ARRAY_TYPE);
  __ bne(&compile_lazy);

1127
  // Load the feedback vector from the closure.
1128 1129 1130 1131 1132
  __ LoadTaggedPointerField(
      feedback_vector,
      FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
  __ LoadTaggedPointerField(
      feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
1133 1134 1135 1136

  Label push_stack_frame;
  // Check if feedback vector is valid. If valid, check for optimized code
  // and update invocation count. Otherwise, setup the stack frame.
1137 1138
  __ LoadTaggedPointerField(
      r6, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1139
  __ LoadU16(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset));
1140
  __ CmpS64(r6, Operand(FEEDBACK_VECTOR_TYPE));
1141 1142
  __ bne(&push_stack_frame);

1143
  Register optimization_state = r6;
1144

1145
  // Read off the optimization state in the feedback vector.
1146
  __ LoadS32(optimization_state,
1147
           FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
1148

1149 1150
  // Check if the optimized code slot is not empty or has a optimization marker.
  Label has_optimized_code_or_marker;
1151 1152 1153
  __ TestBitMask(optimization_state,
                 FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask,
                 r0);
1154
  __ bne(&has_optimized_code_or_marker);
1155 1156 1157

  Label not_optimized;
  __ bind(&not_optimized);
1158

1159
  // Increment invocation count for the function.
1160
  __ LoadS32(r1, FieldMemOperand(feedback_vector,
1161
                               FeedbackVector::kInvocationCountOffset));
1162
  __ AddS64(r1, r1, Operand(1));
1163 1164
  __ StoreU32(r1, FieldMemOperand(feedback_vector,
                                  FeedbackVector::kInvocationCountOffset));
1165

1166 1167 1168
  // Open a frame scope to indicate that there is a frame on the stack.  The
  // MANUAL indicates that the scope shouldn't actually generate code to set up
  // the frame (that is done below).
1169
  __ bind(&push_stack_frame);
1170 1171
  FrameScope frame_scope(masm, StackFrame::MANUAL);
  __ PushStandardFrame(closure);
1172

1173 1174 1175 1176
  // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
  // 8-bit fields next to each other, so we could just optimize by writing a
  // 16-bit. These static asserts guard our assumption is valid.
  STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
1177
                BytecodeArray::kOsrNestingLevelOffset + kCharSize);
1178
  STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
1179
  __ mov(r1, Operand(0));
1180 1181 1182 1183
  __ StoreU16(r1,
              FieldMemOperand(kInterpreterBytecodeArrayRegister,
                              BytecodeArray::kOsrNestingLevelOffset),
              r0);
1184

1185 1186 1187 1188
  // Load the initial bytecode offset.
  __ mov(kInterpreterBytecodeOffsetRegister,
         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));

1189
  // Push bytecode array and Smi tagged bytecode array offset.
1190
  __ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
1191
  __ Push(kInterpreterBytecodeArrayRegister, r4);
1192 1193

  // Allocate the local and temporary register file on the stack.
1194
  Label stack_overflow;
1195 1196
  {
    // Load frame size (word) from the BytecodeArray object.
1197
    __ LoadU32(r4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
1198 1199 1200
                                  BytecodeArray::kFrameSizeOffset));

    // Do a stack check to ensure we don't go over the limit.
1201
    __ SubS64(r8, sp, r4);
1202
    __ CmpU64(r8, __ StackLimitAsMemOperand(StackLimitKind::kRealStackLimit));
1203
    __ blt(&stack_overflow);
1204 1205 1206 1207

    // If ok, push undefined as the initial value for all register file entries.
    // TODO(rmcilroy): Consider doing more than one push per loop iteration.
    Label loop, no_args;
1208
    __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1209
    __ ShiftRightU64(r4, r4, Operand(kSystemPointerSizeLog2));
1210 1211
    __ LoadAndTestP(r4, r4);
    __ beq(&no_args);
1212
    __ mov(r1, r4);
1213
    __ bind(&loop);
1214
    __ push(kInterpreterAccumulatorRegister);
1215
    __ SubS64(r1, Operand(1));
1216 1217 1218 1219
    __ bne(&loop);
    __ bind(&no_args);
  }

1220
  // If the bytecode array has a valid incoming new target or generator object
1221
  // register, initialize it with incoming value which was passed in r5.
1222
  Label no_incoming_new_target_or_generator_register;
1223
  __ LoadS32(r8, FieldMemOperand(
1224 1225
                   kInterpreterBytecodeArrayRegister,
                   BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
1226
  __ CmpS64(r8, Operand::Zero());
1227
  __ beq(&no_incoming_new_target_or_generator_register);
1228
  __ ShiftLeftU64(r8, r8, Operand(kSystemPointerSizeLog2));
1229
  __ StoreU64(r5, MemOperand(fp, r8));
1230 1231
  __ bind(&no_incoming_new_target_or_generator_register);

1232 1233 1234
  // Perform interrupt stack check.
  // TODO(solanes): Merge with the real stack limit check above.
  Label stack_check_interrupt, after_stack_check_interrupt;
1235 1236
  __ LoadU64(r0,
             __ StackLimitAsMemOperand(StackLimitKind::kInterruptStackLimit));
1237
  __ CmpU64(sp, r0);
1238 1239 1240
  __ blt(&stack_check_interrupt);
  __ bind(&after_stack_check_interrupt);

1241 1242
  // The accumulator is already loaded with undefined.

1243 1244 1245 1246
  // Load the dispatch table into a register and dispatch to the bytecode
  // handler at the current bytecode offset.
  Label do_dispatch;
  __ bind(&do_dispatch);
1247 1248 1249
  __ Move(
      kInterpreterDispatchTableRegister,
      ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1250

1251
  __ LoadU8(r5, MemOperand(kInterpreterBytecodeArrayRegister,
1252
                           kInterpreterBytecodeOffsetRegister));
1253
  __ ShiftLeftU64(r5, r5, Operand(kSystemPointerSizeLog2));
1254 1255
  __ LoadU64(kJavaScriptCallCodeStartRegister,
             MemOperand(kInterpreterDispatchTableRegister, r5));
1256
  __ Call(kJavaScriptCallCodeStartRegister);
1257

1258 1259
  masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());

1260 1261 1262
  // Any returns to the entry trampoline are either due to the return bytecode
  // or the interpreter tail calling a builtin and then a dispatch.

1263
  // Get bytecode array and bytecode offset from the stack frame.
1264 1265 1266 1267
  __ LoadU64(kInterpreterBytecodeArrayRegister,
             MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
  __ LoadU64(kInterpreterBytecodeOffsetRegister,
             MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1268 1269
  __ SmiUntag(kInterpreterBytecodeOffsetRegister);

1270
  // Either return, or advance to the next bytecode and dispatch.
1271
  Label do_return;
1272
  __ LoadU8(r3, MemOperand(kInterpreterBytecodeArrayRegister,
1273
                           kInterpreterBytecodeOffsetRegister));
1274
  AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1275
                                kInterpreterBytecodeOffsetRegister, r3, r4, r5,
1276
                                &do_return);
1277
  __ b(&do_dispatch);
1278

1279 1280
  __ bind(&do_return);
  // The return value is in r2.
1281
  LeaveInterpreterFrame(masm, r4, r6);
1282
  __ Ret();
1283

1284 1285 1286 1287 1288 1289
  __ bind(&stack_check_interrupt);
  // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
  // for the call to the StackGuard.
  __ mov(kInterpreterBytecodeOffsetRegister,
         Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
                              kFunctionEntryBytecodeOffset)));
1290 1291
  __ StoreU64(kInterpreterBytecodeOffsetRegister,
              MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1292 1293 1294 1295 1296
  __ CallRuntime(Runtime::kStackGuard);

  // After the call, restore the bytecode array, bytecode offset and accumulator
  // registers again. Also, restore the bytecode offset in the stack to its
  // previous value.
1297 1298
  __ LoadU64(kInterpreterBytecodeArrayRegister,
             MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1299 1300 1301 1302
  __ mov(kInterpreterBytecodeOffsetRegister,
         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
  __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);

1303
  __ SmiTag(r0, kInterpreterBytecodeOffsetRegister);
1304 1305
  __ StoreU64(r0,
              MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1306 1307 1308

  __ jmp(&after_stack_check_interrupt);

1309
  __ bind(&has_optimized_code_or_marker);
1310 1311
  MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
                                               feedback_vector);
1312

1313 1314
  __ bind(&compile_lazy);
  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
1315 1316 1317 1318

  __ bind(&stack_overflow);
  __ CallRuntime(Runtime::kThrowStackOverflow);
  __ bkpt(0);  // Should not return.
1319 1320
}

1321
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
1322 1323 1324
                                         Register num_args,
                                         Register start_address,
                                         Register scratch) {
1325
  __ SubS64(scratch, num_args, Operand(1));
1326
  __ ShiftLeftU64(scratch, scratch, Operand(kSystemPointerSizeLog2));
1327
  __ SubS64(start_address, start_address, scratch);
1328 1329 1330
  // Push the arguments.
  __ PushArray(start_address, num_args, r1, scratch,
               TurboAssembler::PushArrayOrder::kReverse);
1331 1332 1333
}

// static
1334
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
1335
    MacroAssembler* masm, ConvertReceiverMode receiver_mode,
1336
    InterpreterPushArgsMode mode) {
1337
  DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
1338 1339 1340 1341 1342 1343 1344
  // ----------- S t a t e -------------
  //  -- r2 : the number of arguments (not including the receiver)
  //  -- r4 : the address of the first argument to be pushed. Subsequent
  //          arguments should be consecutive above this, in the same order as
  //          they are to be pushed onto the stack.
  //  -- r3 : the target to call (can be any Object).
  // -----------------------------------
1345
  Label stack_overflow;
1346 1347
  if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
    // The spread argument should not be pushed.
1348
    __ SubS64(r2, r2, Operand(1));
1349 1350
  }

1351 1352
  // Calculate number of arguments (AddS64 one for receiver).
  __ AddS64(r5, r2, Operand(1));
1353
  __ StackOverflowCheck(r5, ip, &stack_overflow);
1354

1355 1356
  if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
    // Don't copy receiver. Argument count is correct.
1357
    __ mov(r5, r2);
1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370
  }

  // Push the arguments.
  Generate_InterpreterPushArgs(masm, r5, r4, r6);

  if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
    __ PushRoot(RootIndex::kUndefinedValue);
  }

  if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
    // Pass the spread in the register r2.
    // r2 already points to the penultimate argument, the spread
    // lies in the next interpreter register.
1371
    __ LoadU64(r4, MemOperand(r4, -kSystemPointerSize));
1372
  }
1373 1374

  // Call the target.
1375
  if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1376
    __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
1377
            RelocInfo::CODE_TARGET);
1378
  } else {
1379
    __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
1380 1381
            RelocInfo::CODE_TARGET);
  }
1382 1383 1384 1385 1386 1387 1388

  __ bind(&stack_overflow);
  {
    __ TailCallRuntime(Runtime::kThrowStackOverflow);
    // Unreachable Code.
    __ bkpt(0);
  }
1389 1390 1391
}

// static
1392
void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
1393
    MacroAssembler* masm, InterpreterPushArgsMode mode) {
1394 1395 1396 1397
  // ----------- S t a t e -------------
  // -- r2 : argument count (not including receiver)
  // -- r5 : new target
  // -- r3 : constructor to call
1398 1399
  // -- r4 : allocation site feedback if available, undefined otherwise.
  // -- r6 : address of the first argument
1400
  // -----------------------------------
1401
  Label stack_overflow;
1402
  __ AddS64(r7, r2, Operand(1));
1403
  __ StackOverflowCheck(r7, ip, &stack_overflow);
1404 1405 1406

  if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
    // The spread argument should not be pushed.
1407
    __ SubS64(r2, r2, Operand(1));
1408 1409 1410 1411
  }

  // Push the arguments. r4 and r5 will be modified.
  Generate_InterpreterPushArgs(masm, r2, r6, r7);
1412

1413 1414 1415 1416 1417 1418 1419 1420 1421
  // Push a slot for the receiver to be constructed.
  __ mov(r0, Operand::Zero());
  __ push(r0);

  if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
    // Pass the spread in the register r2.
    // r4 already points to the penultimate argument, the spread
    // lies in the next interpreter register.
    __ lay(r6, MemOperand(r6, -kSystemPointerSize));
1422
    __ LoadU64(r4, MemOperand(r6));
1423 1424 1425 1426
  } else {
    __ AssertUndefinedOrAllocationSite(r4, r7);
  }

1427
  if (mode == InterpreterPushArgsMode::kArrayFunction) {
1428 1429
    __ AssertFunction(r3);

1430
    // Tail call to the array construct stub (still in the caller
1431
    // context at this point).
1432 1433
    Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
    __ Jump(code, RelocInfo::CODE_TARGET);
1434
  } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1435
    // Call the constructor with r2, r3, and r5 unmodified.
1436
    __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
1437
            RelocInfo::CODE_TARGET);
1438
  } else {
1439
    DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
1440
    // Call the constructor with r2, r3, and r5 unmodified.
1441
    __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
1442
  }
1443 1444 1445 1446 1447 1448 1449

  __ bind(&stack_overflow);
  {
    __ TailCallRuntime(Runtime::kThrowStackOverflow);
    // Unreachable Code.
    __ bkpt(0);
  }
1450 1451
}

1452
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
1453 1454
  // Set the return address to the correct point in the interpreter entry
  // trampoline.
1455
  Label builtin_trampoline, trampoline_loaded;
1456
  Smi interpreter_entry_return_pc_offset(
1457
      masm->isolate()->heap()->interpreter_entry_return_pc_offset());
1458
  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
1459

1460 1461 1462 1463
  // If the SFI function_data is an InterpreterData, the function will have a
  // custom copy of the interpreter entry trampoline for profiling. If so,
  // get the custom trampoline, otherwise grab the entry address of the global
  // trampoline.
1464
  __ LoadU64(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
1465 1466 1467 1468
  __ LoadTaggedPointerField(
      r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
  __ LoadTaggedPointerField(
      r4, FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
1469 1470 1471 1472 1473
  __ CompareObjectType(r4, kInterpreterDispatchTableRegister,
                       kInterpreterDispatchTableRegister,
                       INTERPRETER_DATA_TYPE);
  __ bne(&builtin_trampoline);

1474 1475
  __ LoadTaggedPointerField(
      r4, FieldMemOperand(r4, InterpreterData::kInterpreterTrampolineOffset));
1476
  __ AddS64(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
1477 1478 1479
  __ b(&trampoline_loaded);

  __ bind(&builtin_trampoline);
1480 1481 1482
  __ Move(r4, ExternalReference::
                  address_of_interpreter_entry_trampoline_instruction_start(
                      masm->isolate()));
1483
  __ LoadU64(r4, MemOperand(r4));
1484 1485

  __ bind(&trampoline_loaded);
1486
  __ AddS64(r14, r4, Operand(interpreter_entry_return_pc_offset.value()));
1487

1488
  // Initialize the dispatch table register.
1489 1490 1491
  __ Move(
      kInterpreterDispatchTableRegister,
      ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1492 1493

  // Get the bytecode array pointer from the frame.
1494 1495
  __ LoadU64(kInterpreterBytecodeArrayRegister,
             MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1496 1497 1498 1499

  if (FLAG_debug_code) {
    // Check function data field is actually a BytecodeArray object.
    __ TestIfSmi(kInterpreterBytecodeArrayRegister);
1500 1501
    __ Assert(
        ne, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1502 1503
    __ CompareObjectType(kInterpreterBytecodeArrayRegister, r3, no_reg,
                         BYTECODE_ARRAY_TYPE);
1504 1505
    __ Assert(
        eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1506 1507 1508
  }

  // Get the target bytecode offset from the frame.
1509 1510
  __ LoadU64(kInterpreterBytecodeOffsetRegister,
             MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1511 1512
  __ SmiUntag(kInterpreterBytecodeOffsetRegister);

1513 1514
  if (FLAG_debug_code) {
    Label okay;
1515 1516
    __ CmpS64(kInterpreterBytecodeOffsetRegister,
              Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1517 1518 1519 1520 1521
    __ bge(&okay);
    __ bkpt(0);
    __ bind(&okay);
  }

1522
  // Dispatch to the target bytecode.
1523 1524
  UseScratchRegisterScope temps(masm);
  Register scratch = temps.Acquire();
1525
  __ LoadU8(scratch, MemOperand(kInterpreterBytecodeArrayRegister,
1526
                                kInterpreterBytecodeOffsetRegister));
1527
  __ ShiftLeftU64(scratch, scratch, Operand(kSystemPointerSizeLog2));
1528 1529
  __ LoadU64(kJavaScriptCallCodeStartRegister,
             MemOperand(kInterpreterDispatchTableRegister, scratch));
1530
  __ Jump(kJavaScriptCallCodeStartRegister);
1531 1532
}

1533
void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
1534
  // Get bytecode array and bytecode offset from the stack frame.
1535 1536 1537 1538
  __ LoadU64(kInterpreterBytecodeArrayRegister,
             MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
  __ LoadU64(kInterpreterBytecodeOffsetRegister,
             MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1539 1540
  __ SmiUntag(kInterpreterBytecodeOffsetRegister);

1541
  Label enter_bytecode, function_entry_bytecode;
1542 1543 1544
  __ CmpS64(kInterpreterBytecodeOffsetRegister,
            Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
                    kFunctionEntryBytecodeOffset));
1545 1546
  __ beq(&function_entry_bytecode);

1547
  // Load the current bytecode.
1548
  __ LoadU8(r3, MemOperand(kInterpreterBytecodeArrayRegister,
1549 1550
                           kInterpreterBytecodeOffsetRegister));

1551
  // Advance to the next bytecode.
1552 1553
  Label if_return;
  AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1554
                                kInterpreterBytecodeOffsetRegister, r3, r4, r5,
1555
                                &if_return);
1556

1557
  __ bind(&enter_bytecode);
1558 1559
  // Convert new bytecode offset to a Smi and save in the stackframe.
  __ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
1560 1561
  __ StoreU64(r4,
              MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1562 1563

  Generate_InterpreterEnterBytecode(masm);
1564

1565 1566 1567 1568 1569 1570 1571 1572 1573
  __ bind(&function_entry_bytecode);
  // If the code deoptimizes during the implicit function entry stack interrupt
  // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
  // not a valid bytecode offset. Detect this case and advance to the first
  // actual bytecode.
  __ mov(kInterpreterBytecodeOffsetRegister,
         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
  __ b(&enter_bytecode);

1574 1575 1576
  // We should never take the if_return path.
  __ bind(&if_return);
  __ Abort(AbortReason::kInvalidBytecodeAdvance);
1577 1578
}

1579
void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
1580 1581 1582
  Generate_InterpreterEnterBytecode(masm);
}

1583 1584 1585 1586
namespace {
void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
                                      bool java_script_builtin,
                                      bool with_result) {
1587
  const RegisterConfiguration* config(RegisterConfiguration::Default());
1588
  int allocatable_register_count = config->num_allocatable_general_registers();
1589
  Register scratch = ip;
1590
  if (with_result) {
1591
    if (java_script_builtin) {
1592
      __ mov(scratch, r2);
1593 1594 1595
    } else {
      // Overwrite the hole inserted by the deoptimizer with the return value
      // from the LAZY deopt point.
1596
      __ StoreU64(
1597 1598 1599 1600 1601
          r2, MemOperand(
                  sp, config->num_allocatable_general_registers() *
                              kSystemPointerSize +
                          BuiltinContinuationFrameConstants::kFixedFrameSize));
    }
1602 1603 1604 1605 1606 1607 1608 1609
  }
  for (int i = allocatable_register_count - 1; i >= 0; --i) {
    int code = config->GetAllocatableGeneralCode(i);
    __ Pop(Register::from_code(code));
    if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
      __ SmiUntag(Register::from_code(code));
    }
  }
1610 1611 1612 1613
  if (java_script_builtin && with_result) {
    // Overwrite the hole inserted by the deoptimizer with the return value from
    // the LAZY deopt point. r0 contains the arguments count, the return value
    // from LAZY is always the last argument.
1614 1615
    __ AddS64(r2, r2,
              Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
1616
    __ ShiftLeftU64(r1, r2, Operand(kSystemPointerSizeLog2));
1617
    __ StoreU64(scratch, MemOperand(sp, r1));
1618
    // Recover arguments count.
1619
    __ SubS64(r2, r2,
1620
              Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
1621
  }
1622
  __ LoadU64(
1623 1624
      fp,
      MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1625 1626
  // Load builtin index (stored as a Smi) and use it to get the builtin start
  // address from the builtins table.
1627 1628 1629
  UseScratchRegisterScope temps(masm);
  Register builtin = temps.Acquire();
  __ Pop(builtin);
1630 1631
  __ AddS64(sp, sp,
            Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1632
  __ Pop(r0);
1633
  __ mov(r14, r0);
1634 1635
  __ LoadEntryFromBuiltinIndex(builtin);
  __ Jump(builtin);
1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656
}
}  // namespace

void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
  Generate_ContinueToBuiltinHelper(masm, false, false);
}

void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
    MacroAssembler* masm) {
  Generate_ContinueToBuiltinHelper(masm, false, true);
}

void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
  Generate_ContinueToBuiltinHelper(masm, true, false);
}

void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
    MacroAssembler* masm) {
  Generate_ContinueToBuiltinHelper(masm, true, true);
}

1657
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
1658 1659 1660 1661 1662
  {
    FrameScope scope(masm, StackFrame::INTERNAL);
    __ CallRuntime(Runtime::kNotifyDeoptimized);
  }

1663
  DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r2.code());
1664
  __ pop(r2);
1665 1666 1667
  __ Ret();
}

1668
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
1669 1670 1671 1672 1673
  {
    FrameScope scope(masm, StackFrame::INTERNAL);
    __ CallRuntime(Runtime::kCompileForOnStackReplacement);
  }

1674
  // If the code object is null, just return to the caller.
1675
  Label skip;
1676
  __ CmpSmiLiteral(r2, Smi::zero(), r0);
1677 1678 1679 1680 1681
  __ bne(&skip);
  __ Ret();

  __ bind(&skip);

1682
  // Drop the handler frame that is be sitting on top of the actual
1683
  // JavaScript frame. This is the case then OSR is triggered from bytecode.
1684
  __ LeaveFrame(StackFrame::STUB);
1685

1686 1687
  // Load deoptimization data from the code object.
  // <deopt_data> = <code>[#deoptimization_data_offset]
1688 1689
  __ LoadTaggedPointerField(
      r3, FieldMemOperand(r2, Code::kDeoptimizationDataOffset));
1690 1691 1692

  // Load the OSR entrypoint offset from the deoptimization data.
  // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
1693 1694 1695
  __ SmiUntagField(
      r3, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(
                                  DeoptimizationData::kOsrPcOffsetIndex)));
1696 1697 1698

  // Compute the target address = code_obj + header_size + osr_offset
  // <entry_addr> = <code_obj> + #header_size + <osr_offset>
1699 1700
  __ AddS64(r2, r3);
  __ AddS64(r0, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
1701
  __ mov(r14, r0);
1702 1703 1704 1705 1706 1707 1708 1709 1710

  // And "return" to the OSR entry point of the function.
  __ Ret();
}

// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
  // ----------- S t a t e -------------
  //  -- r2    : argc
1711
  //  -- sp[0] : receiver
1712
  //  -- sp[4] : thisArg
1713
  //  -- sp[8] : argArray
1714 1715
  // -----------------------------------

1716
  // 1. Load receiver into r3, argArray into r4 (if present), remove all
1717 1718 1719
  // arguments from the stack (including the receiver), and push thisArg (if
  // present) instead.
  {
1720
    __ LoadRoot(r7, RootIndex::kUndefinedValue);
1721
    __ mov(r4, r7);
1722 1723
    Label done;

1724
    __ LoadU64(r3, MemOperand(sp));  // receiver
1725 1726
    __ cghi(r2, Operand(1));
    __ blt(&done);
1727
    __ LoadU64(r7, MemOperand(sp, kSystemPointerSize));  // thisArg
1728 1729
    __ cghi(r2, Operand(2));
    __ blt(&done);
1730
    __ LoadU64(r4, MemOperand(sp, 2 * kSystemPointerSize));  // argArray
1731 1732

    __ bind(&done);
1733
    __ ShiftLeftU64(r1, r2, Operand(kSystemPointerSizeLog2));
1734
    __ lay(sp, MemOperand(sp, r1));
1735
    __ StoreU64(r7, MemOperand(sp));
1736 1737 1738
  }

  // ----------- S t a t e -------------
1739
  //  -- r4    : argArray
1740 1741 1742 1743
  //  -- r3    : receiver
  //  -- sp[0] : thisArg
  // -----------------------------------

1744 1745 1746
  // 2. We don't need to check explicitly for callable receiver here,
  // since that's the first thing the Call/CallWithArrayLike builtins
  // will do.
1747 1748 1749

  // 3. Tail call with no arguments if argArray is null or undefined.
  Label no_arguments;
1750 1751
  __ JumpIfRoot(r4, RootIndex::kNullValue, &no_arguments);
  __ JumpIfRoot(r4, RootIndex::kUndefinedValue, &no_arguments);
1752

1753
  // 4a. Apply the receiver to the given argArray.
1754
  __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1755
          RelocInfo::CODE_TARGET);
1756 1757 1758 1759 1760

  // 4b. The argArray is either null or undefined, so we tail call without any
  // arguments to the receiver.
  __ bind(&no_arguments);
  {
1761
    __ mov(r2, Operand::Zero());
1762 1763 1764 1765 1766 1767
    __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
  }
}

// static
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
1768 1769 1770 1771 1772 1773 1774 1775 1776 1777
  // 1. Get the callable to call (passed as receiver) from the stack.
  __ Pop(r3);

  // 2. Make sure we have at least one argument.
  // r2: actual number of arguments
  {
    Label done;
    __ cghi(r2, Operand::Zero());
    __ b(ne, &done);
    __ PushRoot(RootIndex::kUndefinedValue);
1778
    __ AddS64(r2, r2, Operand(1));
1779 1780 1781 1782
    __ bind(&done);
  }

  // 3. Adjust the actual number of arguments.
1783
  __ SubS64(r2, r2, Operand(1));
1784 1785 1786 1787 1788 1789 1790 1791

  // 4. Call the callable.
  __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}

void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
  // ----------- S t a t e -------------
  //  -- r2     : argc
1792 1793 1794 1795
  //  -- sp[0]  : receiver
  //  -- sp[4]  : target         (if argc >= 1)
  //  -- sp[8]  : thisArgument   (if argc >= 2)
  //  -- sp[12] : argumentsList  (if argc == 3)
1796 1797
  // -----------------------------------

1798
  // 1. Load target into r3 (if present), argumentsList into r4 (if present),
1799 1800 1801
  // remove all arguments from the stack (including the receiver), and push
  // thisArgument (if present) instead.
  {
1802
    __ LoadRoot(r3, RootIndex::kUndefinedValue);
1803 1804
    __ mov(r7, r3);
    __ mov(r4, r3);
1805 1806 1807 1808 1809

    Label done;

    __ cghi(r2, Operand(1));
    __ blt(&done);
1810
    __ LoadU64(r3, MemOperand(sp, kSystemPointerSize));  // thisArg
1811 1812
    __ cghi(r2, Operand(2));
    __ blt(&done);
1813
    __ LoadU64(r7, MemOperand(sp, 2 * kSystemPointerSize));  // argArray
1814 1815
    __ cghi(r2, Operand(3));
    __ blt(&done);
1816
    __ LoadU64(r4, MemOperand(sp, 3 * kSystemPointerSize));  // argArray
1817 1818

    __ bind(&done);
1819
    __ ShiftLeftU64(r1, r2, Operand(kSystemPointerSizeLog2));
1820
    __ lay(sp, MemOperand(sp, r1));
1821
    __ StoreU64(r7, MemOperand(sp));
1822 1823 1824
  }

  // ----------- S t a t e -------------
1825
  //  -- r4    : argumentsList
1826 1827 1828 1829
  //  -- r3    : target
  //  -- sp[0] : thisArgument
  // -----------------------------------

1830 1831 1832
  // 2. We don't need to check explicitly for callable target here,
  // since that's the first thing the Call/CallWithArrayLike builtins
  // will do.
1833

1834
  // 3 Apply the target to the given argumentsList.
1835
  __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1836
          RelocInfo::CODE_TARGET);
1837 1838 1839 1840 1841
}

void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
  // ----------- S t a t e -------------
  //  -- r2     : argc
1842 1843 1844 1845
  //  -- sp[0]  : receiver
  //  -- sp[4]  : target
  //  -- sp[8]  : argumentsList
  //  -- sp[12] : new.target (optional)
1846 1847
  // -----------------------------------

1848
  // 1. Load target into r3 (if present), argumentsList into r4 (if present),
1849 1850 1851 1852
  // new.target into r5 (if present, otherwise use target), remove all
  // arguments from the stack (including the receiver), and push thisArgument
  // (if present) instead.
  {
1853
    __ LoadRoot(r3, RootIndex::kUndefinedValue);
1854
    __ mov(r4, r3);
1855 1856 1857

    Label done;

1858
    __ mov(r6, r3);
1859 1860
    __ cghi(r2, Operand(1));
    __ blt(&done);
1861
    __ LoadU64(r3, MemOperand(sp, kSystemPointerSize));  // thisArg
1862
    __ mov(r5, r3);
1863 1864
    __ cghi(r2, Operand(2));
    __ blt(&done);
1865
    __ LoadU64(r4, MemOperand(sp, 2 * kSystemPointerSize));  // argArray
1866 1867
    __ cghi(r2, Operand(3));
    __ blt(&done);
1868
    __ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize));  // argArray
1869
    __ bind(&done);
1870
    __ ShiftLeftU64(r1, r2, Operand(kSystemPointerSizeLog2));
1871
    __ lay(sp, MemOperand(sp, r1));
1872
    __ StoreU64(r6, MemOperand(sp));
1873 1874 1875
  }

  // ----------- S t a t e -------------
1876
  //  -- r4    : argumentsList
1877 1878 1879 1880 1881
  //  -- r5    : new.target
  //  -- r3    : target
  //  -- sp[0] : receiver (undefined)
  // -----------------------------------

1882 1883 1884
  // 2. We don't need to check explicitly for constructor target here,
  // since that's the first thing the Construct/ConstructWithArrayLike
  // builtins will do.
1885

1886 1887 1888
  // 3. We don't need to check explicitly for constructor new.target here,
  // since that's the second thing the Construct/ConstructWithArrayLike
  // builtins will do.
1889

1890
  // 4. Construct the target with the given new.target and argumentsList.
1891
  __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
1892
          RelocInfo::CODE_TARGET);
1893 1894 1895
}

// static
1896
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
1897 1898
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
                                               Handle<Code> code) {
1899
  // ----------- S t a t e -------------
1900 1901 1902 1903 1904
  //  -- r3 : target
  //  -- r2 : number of parameters on the stack (not including the receiver)
  //  -- r4 : arguments list (a FixedArray)
  //  -- r6 : len (number of elements to push from args)
  //  -- r5 : new.target (for [[Construct]])
1905 1906
  // -----------------------------------

1907 1908
  Register scratch = ip;

1909
  if (FLAG_debug_code) {
1910 1911 1912
    // Allow r4 to be a FixedArray, or a FixedDoubleArray if r6 == 0.
    Label ok, fail;
    __ AssertNotSmi(r4);
1913 1914
    __ LoadTaggedPointerField(scratch,
                              FieldMemOperand(r4, HeapObject::kMapOffset));
1915
    __ LoadS16(scratch,
1916
                     FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1917
    __ CmpS64(scratch, Operand(FIXED_ARRAY_TYPE));
1918
    __ beq(&ok);
1919
    __ CmpS64(scratch, Operand(FIXED_DOUBLE_ARRAY_TYPE));
1920
    __ bne(&fail);
1921
    __ CmpS64(r6, Operand::Zero());
1922 1923 1924 1925 1926 1927 1928 1929
    __ beq(&ok);
    // Fall through.
    __ bind(&fail);
    __ Abort(AbortReason::kOperandIsNotAFixedArray);

    __ bind(&ok);
  }

1930
  // Check for stack overflow.
1931
  Label stack_overflow;
1932
  __ StackOverflowCheck(r6, scratch, &stack_overflow);
1933

1934 1935 1936 1937 1938
  // Move the arguments already in the stack,
  // including the receiver and the return address.
  {
    Label copy, check;
    Register num = ip, src = r8, dest = r7;
1939
    __ mov(src, sp);
1940
    __ ShiftLeftU64(r1, r6, Operand(kSystemPointerSizeLog2));
1941
    __ SubS64(sp, sp, r1);
1942
    // Update stack pointer.
1943
    __ mov(dest, sp);
1944 1945 1946
    __ ltgr(num, r2);
    __ b(&check);
    __ bind(&copy);
1947
    __ LoadU64(r0, MemOperand(src));
1948
    __ lay(src, MemOperand(src, kSystemPointerSize));
1949
    __ StoreU64(r0, MemOperand(dest));
1950
    __ lay(dest, MemOperand(dest, kSystemPointerSize));
1951
    __ SubS64(num, num, Operand(1));
1952 1953 1954 1955
    __ bind(&check);
    __ b(ge, &copy);
  }

1956 1957
  // Push arguments onto the stack (thisArgument is already on the stack).
  {
1958
    Label loop, no_args, skip;
1959
    __ CmpS64(r6, Operand::Zero());
1960
    __ beq(&no_args);
1961 1962
    __ AddS64(r4, r4,
              Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
1963
    __ mov(r1, r6);
1964
    __ bind(&loop);
1965 1966
    __ LoadAnyTaggedField(scratch, MemOperand(r4, kTaggedSize), r0);
    __ la(r4, MemOperand(r4, kTaggedSize));
1967
    __ CompareRoot(scratch, RootIndex::kTheHoleValue);
1968
    __ bne(&skip, Label::kNear);
1969
    __ LoadRoot(scratch, RootIndex::kUndefinedValue);
1970
    __ bind(&skip);
1971
    __ StoreU64(scratch, MemOperand(r7));
1972
    __ lay(r7, MemOperand(r7, kSystemPointerSize));
1973 1974
    __ BranchOnCount(r1, &loop);
    __ bind(&no_args);
1975
    __ AddS64(r2, r2, r6);
1976 1977
  }

1978 1979
  // Tail-call to the actual Call or Construct builtin.
  __ Jump(code, RelocInfo::CODE_TARGET);
1980 1981 1982

  __ bind(&stack_overflow);
  __ TailCallRuntime(Runtime::kThrowStackOverflow);
1983 1984
}

1985
// static
1986
void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
1987
                                                      CallOrConstructMode mode,
1988
                                                      Handle<Code> code) {
1989
  // ----------- S t a t e -------------
1990 1991 1992 1993
  //  -- r2 : the number of arguments (not including the receiver)
  //  -- r5 : the new.target (for [[Construct]] calls)
  //  -- r3 : the target to call (can be any Object)
  //  -- r4 : start index (to support rest parameters)
1994 1995
  // -----------------------------------

1996 1997 1998 1999 2000
  Register scratch = r8;

  if (mode == CallOrConstructMode::kConstruct) {
    Label new_target_constructor, new_target_not_constructor;
    __ JumpIfSmi(r5, &new_target_not_constructor);
2001 2002
    __ LoadTaggedPointerField(scratch,
                              FieldMemOperand(r5, HeapObject::kMapOffset));
2003
    __ LoadU8(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
2004
    __ tmll(scratch, Operand(Map::Bits1::IsConstructorBit::kShift));
2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015
    __ bne(&new_target_constructor);
    __ bind(&new_target_not_constructor);
    {
      FrameScope scope(masm, StackFrame::MANUAL);
      __ EnterFrame(StackFrame::INTERNAL);
      __ Push(r5);
      __ CallRuntime(Runtime::kThrowNotConstructor);
    }
    __ bind(&new_target_constructor);
  }

2016
  Label stack_done, stack_overflow;
2017
  __ LoadU64(r7, MemOperand(fp, StandardFrameConstants::kArgCOffset));
2018
  __ SubS64(r7, r7, r4);
2019
  __ ble(&stack_done);
2020
  {
2021 2022 2023 2024 2025 2026 2027 2028 2029 2030
    // ----------- S t a t e -------------
    //  -- r2 : the number of arguments already in the stack (not including the
    //  receiver)
    //  -- r3 : the target to call (can be any Object)
    //  -- r4 : start index (to support rest parameters)
    //  -- r5 : the new.target (for [[Construct]] calls)
    //  -- r6 : point to the caller stack frame
    //  -- r7 : number of arguments to copy, i.e. arguments count - start index
    // -----------------------------------

2031
    // Check for stack overflow.
2032
    __ StackOverflowCheck(r7, scratch, &stack_overflow);
2033 2034

    // Forward the arguments from the caller frame.
2035
    __ mov(r5, r5);
2036
    // Point to the first argument to copy (skipping the receiver).
2037
    __ AddS64(r6, fp,
2038 2039
              Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
                      kSystemPointerSize));
2040
    __ ShiftLeftU64(scratch, r4, Operand(kSystemPointerSizeLog2));
2041
    __ AddS64(r6, r6, scratch);
2042 2043 2044 2045 2046 2047 2048

    // Move the arguments already in the stack,
    // including the receiver and the return address.
    {
      Label copy, check;
      Register num = r1, src = ip,
               dest = r4;  // r7 and r10 are context and root.
2049
      __ mov(src, sp);
2050
      // Update stack pointer.
2051
      __ ShiftLeftU64(scratch, r7, Operand(kSystemPointerSizeLog2));
2052
      __ SubS64(sp, sp, scratch);
2053
      __ mov(dest, sp);
2054 2055 2056
      __ ltgr(num, r2);
      __ b(&check);
      __ bind(&copy);
2057
      __ LoadU64(r0, MemOperand(src));
2058
      __ lay(src, MemOperand(src, kSystemPointerSize));
2059
      __ StoreU64(r0, MemOperand(dest));
2060
      __ lay(dest, MemOperand(dest, kSystemPointerSize));
2061
      __ SubS64(num, num, Operand(1));
2062 2063 2064
      __ bind(&check);
      __ b(ge, &copy);
    }
2065

2066 2067 2068
    // Copy arguments from the caller frame.
    // TODO(victorgomes): Consider using forward order as potentially more cache
    // friendly.
2069 2070
    {
      Label loop;
2071
      __ AddS64(r2, r2, r7);
2072 2073
      __ bind(&loop);
      {
2074
        __ SubS64(r7, r7, Operand(1));
2075
        __ ShiftLeftU64(r1, r7, Operand(kSystemPointerSizeLog2));
2076
        __ LoadU64(scratch, MemOperand(r6, r1));
2077
        __ StoreU64(scratch, MemOperand(r4, r1));
2078
        __ CmpS64(r7, Operand::Zero());
2079 2080 2081 2082 2083 2084 2085 2086 2087
        __ bne(&loop);
      }
    }
  }
  __ b(&stack_done);
  __ bind(&stack_overflow);
  __ TailCallRuntime(Runtime::kThrowStackOverflow);
  __ bind(&stack_done);

2088
  // Tail-call to the {code} handler.
2089 2090 2091
  __ Jump(code, RelocInfo::CODE_TARGET);
}

2092 2093
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
2094
                                     ConvertReceiverMode mode) {
2095 2096 2097 2098 2099 2100 2101 2102 2103
  // ----------- S t a t e -------------
  //  -- r2 : the number of arguments (not including the receiver)
  //  -- r3 : the function to call (checked to be a JSFunction)
  // -----------------------------------
  __ AssertFunction(r3);

  // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
  // Check that the function is not a "classConstructor".
  Label class_constructor;
2104 2105
  __ LoadTaggedPointerField(
      r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
2106
  __ LoadU32(r5, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
2107
  __ TestBitMask(r5, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
2108 2109 2110 2111 2112
  __ bne(&class_constructor);

  // Enter the context of the function; ToObject has to run in the function
  // context, and we also need to take the global proxy from the function
  // context in case of conversion.
2113 2114
  __ LoadTaggedPointerField(cp,
                            FieldMemOperand(r3, JSFunction::kContextOffset));
2115 2116
  // We need to convert the receiver for non-native sloppy mode functions.
  Label done_convert;
2117 2118 2119
  __ AndP(r0, r5,
          Operand(SharedFunctionInfo::IsStrictBit::kMask |
                  SharedFunctionInfo::IsNativeBit::kMask));
2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133
  __ bne(&done_convert);
  {
    // ----------- S t a t e -------------
    //  -- r2 : the number of arguments (not including the receiver)
    //  -- r3 : the function to call (checked to be a JSFunction)
    //  -- r4 : the shared function info.
    //  -- cp : the function context.
    // -----------------------------------

    if (mode == ConvertReceiverMode::kNullOrUndefined) {
      // Patch receiver to global proxy.
      __ LoadGlobalProxy(r5);
    } else {
      Label convert_to_object, convert_receiver;
2134
      __ LoadReceiver(r5, r2);
2135 2136 2137 2138 2139 2140
      __ JumpIfSmi(r5, &convert_to_object);
      STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
      __ CompareObjectType(r5, r6, r6, FIRST_JS_RECEIVER_TYPE);
      __ bge(&done_convert);
      if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
        Label convert_global_proxy;
2141 2142
        __ JumpIfRoot(r5, RootIndex::kUndefinedValue, &convert_global_proxy);
        __ JumpIfNotRoot(r5, RootIndex::kNullValue, &convert_to_object);
2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157
        __ bind(&convert_global_proxy);
        {
          // Patch receiver to global proxy.
          __ LoadGlobalProxy(r5);
        }
        __ b(&convert_receiver);
      }
      __ bind(&convert_to_object);
      {
        // Convert receiver using ToObject.
        // TODO(bmeurer): Inline the allocation here to avoid building the frame
        // in the fast case? (fall back to AllocateInNewSpace?)
        FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
        __ SmiTag(r2);
        __ Push(r2, r3);
2158
        __ mov(r2, r5);
2159
        __ Push(cp);
2160
        __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
2161
                RelocInfo::CODE_TARGET);
2162
        __ Pop(cp);
2163
        __ mov(r5, r2);
2164 2165 2166
        __ Pop(r2, r3);
        __ SmiUntag(r2);
      }
2167 2168
      __ LoadTaggedPointerField(
          r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
2169 2170
      __ bind(&convert_receiver);
    }
2171
    __ StoreReceiver(r5, r2, r6);
2172 2173 2174 2175 2176 2177 2178 2179 2180 2181
  }
  __ bind(&done_convert);

  // ----------- S t a t e -------------
  //  -- r2 : the number of arguments (not including the receiver)
  //  -- r3 : the function to call (checked to be a JSFunction)
  //  -- r4 : the shared function info.
  //  -- cp : the function context.
  // -----------------------------------

2182
  __ LoadU16(
2183
      r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
2184
  __ InvokeFunctionCode(r3, no_reg, r4, r2, InvokeType::kJump);
2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205

  // The function is a "classConstructor", need to raise an exception.
  __ bind(&class_constructor);
  {
    FrameAndConstantPoolScope frame(masm, StackFrame::INTERNAL);
    __ push(r3);
    __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
  }
}

namespace {

void Generate_PushBoundArguments(MacroAssembler* masm) {
  // ----------- S t a t e -------------
  //  -- r2 : the number of arguments (not including the receiver)
  //  -- r3 : target (checked to be a JSBoundFunction)
  //  -- r5 : new.target (only in case of [[Construct]])
  // -----------------------------------

  // Load [[BoundArguments]] into r4 and length of that into r6.
  Label no_bound_arguments;
2206 2207 2208
  __ LoadTaggedPointerField(
      r4, FieldMemOperand(r3, JSBoundFunction::kBoundArgumentsOffset));
  __ SmiUntagField(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219
  __ LoadAndTestP(r6, r6);
  __ beq(&no_bound_arguments);
  {
    // ----------- S t a t e -------------
    //  -- r2 : the number of arguments (not including the receiver)
    //  -- r3 : target (checked to be a JSBoundFunction)
    //  -- r4 : the [[BoundArguments]] (implemented as FixedArray)
    //  -- r5 : new.target (only in case of [[Construct]])
    //  -- r6 : the number of [[BoundArguments]]
    // -----------------------------------

2220
    Register scratch = r8;
2221 2222 2223
    // Reserve stack space for the [[BoundArguments]].
    {
      Label done;
2224
      __ ShiftLeftU64(r9, r6, Operand(kSystemPointerSizeLog2));
2225
      __ SubS64(r1, sp, r9);
2226 2227 2228
      // Check the stack for overflow. We are not trying to catch interruptions
      // (i.e. debug break and preemption) here, so check the "real stack
      // limit".
2229
      __ CmpU64(r1, __ StackLimitAsMemOperand(StackLimitKind::kRealStackLimit));
2230 2231 2232 2233 2234 2235 2236 2237 2238 2239
      __ bgt(&done);  // Signed comparison.
      // Restore the stack pointer.
      {
        FrameScope scope(masm, StackFrame::MANUAL);
        __ EnterFrame(StackFrame::INTERNAL);
        __ CallRuntime(Runtime::kThrowStackOverflow);
      }
      __ bind(&done);
    }

2240 2241 2242 2243 2244 2245
    // Pop receiver.
    __ Pop(r7);

    // Push [[BoundArguments]].
    {
      Label loop, done;
2246 2247
      __ AddS64(r2, r2, r6);  // Adjust effective number of arguments.
      __ AddS64(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2248 2249

      __ bind(&loop);
2250
      __ SubS64(r1, r6, Operand(1));
2251
      __ ShiftLeftU64(r1, r1, Operand(kTaggedSizeLog2));
2252 2253
      __ LoadAnyTaggedField(scratch, MemOperand(r4, r1), r0);
      __ Push(scratch);
2254
      __ SubS64(r6, r6, Operand(1));
2255 2256 2257 2258 2259 2260
      __ bgt(&loop);
      __ bind(&done);
    }

    // Push receiver.
    __ Push(r7);
2261 2262 2263 2264 2265 2266 2267
  }
  __ bind(&no_bound_arguments);
}

}  // namespace

// static
2268
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
2269 2270 2271 2272 2273 2274 2275
  // ----------- S t a t e -------------
  //  -- r2 : the number of arguments (not including the receiver)
  //  -- r3 : the function to call (checked to be a JSBoundFunction)
  // -----------------------------------
  __ AssertBoundFunction(r3);

  // Patch the receiver to [[BoundThis]].
2276 2277
  __ LoadAnyTaggedField(r5,
                        FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
2278
  __ StoreReceiver(r5, r2, r1);
2279 2280 2281 2282 2283

  // Push the [[BoundArguments]] onto the stack.
  Generate_PushBoundArguments(masm);

  // Call the [[BoundTargetFunction]] via the Call builtin.
2284 2285
  __ LoadTaggedPointerField(
      r3, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
2286 2287
  __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
          RelocInfo::CODE_TARGET);
2288 2289 2290
}

// static
2291
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
2292 2293 2294 2295 2296
  // ----------- S t a t e -------------
  //  -- r2 : the number of arguments (not including the receiver)
  //  -- r3 : the target to call (can be any Object).
  // -----------------------------------

2297
  Label non_callable, non_smi;
2298 2299
  __ JumpIfSmi(r3, &non_callable);
  __ bind(&non_smi);
2300 2301 2302
  __ LoadMap(r6, r3);
  __ CompareInstanceTypeRange(r6, r7, FIRST_JS_FUNCTION_TYPE,
                              LAST_JS_FUNCTION_TYPE);
2303
  __ Jump(masm->isolate()->builtins()->CallFunction(mode),
2304
          RelocInfo::CODE_TARGET, le);
2305
  __ CmpS64(r7, Operand(JS_BOUND_FUNCTION_TYPE));
2306
  __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
2307
          RelocInfo::CODE_TARGET, eq);
2308 2309

  // Check if target has a [[Call]] internal method.
2310
  __ LoadU8(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
2311
  __ TestBit(r6, Map::Bits1::IsCallableBit::kShift);
2312 2313
  __ beq(&non_callable);

2314
  // Check if target is a proxy and call CallProxy external builtin
2315
  __ CmpS64(r7, Operand(JS_PROXY_TYPE));
2316
  __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq);
2317 2318 2319 2320

  // 2. Call to something else, which might have a [[Call]] internal method (if
  // not we raise an exception).
  // Overwrite the original receiver the (original) target.
2321
  __ StoreReceiver(r3, r2, r7);
2322
  // Let the "call_as_function_delegate" take care of the rest.
2323
  __ LoadNativeContextSlot(r3, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
2324
  __ Jump(masm->isolate()->builtins()->CallFunction(
2325
              ConvertReceiverMode::kNotNullOrUndefined),
2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343
          RelocInfo::CODE_TARGET);

  // 3. Call to something that is not callable.
  __ bind(&non_callable);
  {
    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
    __ Push(r3);
    __ CallRuntime(Runtime::kThrowCalledNonCallable);
  }
}

// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
  // ----------- S t a t e -------------
  //  -- r2 : the number of arguments (not including the receiver)
  //  -- r3 : the constructor to call (checked to be a JSFunction)
  //  -- r5 : the new target (checked to be a constructor)
  // -----------------------------------
2344
  __ AssertConstructor(r3, r1);
2345 2346 2347 2348
  __ AssertFunction(r3);

  // Calling convention for function specific ConstructStubs require
  // r4 to contain either an AllocationSite or undefined.
2349
  __ LoadRoot(r4, RootIndex::kUndefinedValue);
2350

2351 2352 2353
  Label call_generic_stub;

  // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
2354 2355
  __ LoadTaggedPointerField(
      r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
2356
  __ LoadU32(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
2357 2358 2359 2360 2361 2362 2363
  __ AndP(r6, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
  __ beq(&call_generic_stub);

  __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
          RelocInfo::CODE_TARGET);

  __ bind(&call_generic_stub);
2364
  __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
2365
          RelocInfo::CODE_TARGET);
2366 2367 2368 2369 2370 2371 2372 2373 2374
}

// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
  // ----------- S t a t e -------------
  //  -- r2 : the number of arguments (not including the receiver)
  //  -- r3 : the function to call (checked to be a JSBoundFunction)
  //  -- r5 : the new target (checked to be a constructor)
  // -----------------------------------
2375
  __ AssertConstructor(r3, r1);
2376 2377 2378 2379 2380 2381 2382
  __ AssertBoundFunction(r3);

  // Push the [[BoundArguments]] onto the stack.
  Generate_PushBoundArguments(masm);

  // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
  Label skip;
2383
  __ CompareTagged(r3, r5);
2384
  __ bne(&skip);
2385 2386
  __ LoadTaggedPointerField(
      r5, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
2387 2388 2389
  __ bind(&skip);

  // Construct the [[BoundTargetFunction]] via the Construct builtin.
2390 2391
  __ LoadTaggedPointerField(
      r3, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
2392
  __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404
}

// static
void Builtins::Generate_Construct(MacroAssembler* masm) {
  // ----------- S t a t e -------------
  //  -- r2 : the number of arguments (not including the receiver)
  //  -- r3 : the constructor to call (can be any Object)
  //  -- r5 : the new target (either the same as the constructor or
  //          the JSFunction on which new was invoked initially)
  // -----------------------------------

  // Check if target is a Smi.
2405
  Label non_constructor, non_proxy;
2406 2407 2408
  __ JumpIfSmi(r3, &non_constructor);

  // Check if target has a [[Construct]] internal method.
2409
  __ LoadTaggedPointerField(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
2410
  __ LoadU8(r4, FieldMemOperand(r6, Map::kBitFieldOffset));
2411
  __ TestBit(r4, Map::Bits1::IsConstructorBit::kShift);
2412 2413
  __ beq(&non_constructor);

2414
  // Dispatch based on instance type.
2415 2416
  __ CompareInstanceTypeRange(r6, r7, FIRST_JS_FUNCTION_TYPE,
                              LAST_JS_FUNCTION_TYPE);
2417
  __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
2418
          RelocInfo::CODE_TARGET, le);
2419

2420 2421
  // Only dispatch to bound functions after checking whether they are
  // constructors.
2422
  __ CmpS64(r7, Operand(JS_BOUND_FUNCTION_TYPE));
2423
  __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
2424 2425 2426
          RelocInfo::CODE_TARGET, eq);

  // Only dispatch to proxies after checking whether they are constructors.
2427
  __ CmpS64(r7, Operand(JS_PROXY_TYPE));
2428
  __ bne(&non_proxy);
2429 2430
  __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
          RelocInfo::CODE_TARGET);
2431 2432

  // Called Construct on an exotic Object with a [[Construct]] internal method.
2433
  __ bind(&non_proxy);
2434 2435
  {
    // Overwrite the original receiver with the (original) target.
2436
    __ StoreReceiver(r3, r2, r7);
2437
    // Let the "call_as_constructor_delegate" take care of the rest.
2438
    __ LoadNativeContextSlot(r3, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
2439 2440 2441 2442 2443 2444 2445
    __ Jump(masm->isolate()->builtins()->CallFunction(),
            RelocInfo::CODE_TARGET);
  }

  // Called Construct on an Object that doesn't have a [[Construct]] internal
  // method.
  __ bind(&non_constructor);
2446
  __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
2447 2448 2449
          RelocInfo::CODE_TARGET);
}

2450
#if V8_ENABLE_WEBASSEMBLY
2451
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
2452
  // The function index was put in a register by the jump table trampoline.
2453
  // Convert to Smi for the runtime call.
2454 2455
  __ SmiTag(kWasmCompileLazyFuncIndexRegister,
            kWasmCompileLazyFuncIndexRegister);
2456
  {
2457
    HardAbortScope hard_abort(masm);  // Avoid calls to Abort.
2458
    FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
2459

2460
    // Save all parameter registers (see wasm-linkage.h). They might be
2461 2462
    // overwritten in the runtime call below. We don't have any callee-saved
    // registers in wasm, so no need to store anything else.
2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479
    RegList gp_regs = 0;
    for (Register gp_param_reg : wasm::kGpParamRegisters) {
      gp_regs |= gp_param_reg.bit();
    }

    RegList fp_regs = 0;
    for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
      fp_regs |= fp_param_reg.bit();
    }

    CHECK_EQ(NumRegs(gp_regs), arraysize(wasm::kGpParamRegisters));
    CHECK_EQ(NumRegs(fp_regs), arraysize(wasm::kFpParamRegisters));
    CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs,
             NumRegs(gp_regs));
    CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
             NumRegs(fp_regs));

2480
    __ MultiPush(gp_regs);
2481
    __ MultiPushF64OrV128(fp_regs);
2482

2483 2484 2485
    // Pass instance and function index as explicit arguments to the runtime
    // function.
    __ Push(kWasmInstanceRegister, r7);
2486
    // Initialize the JavaScript context with 0. CEntry will use it to
2487
    // set the current context on the isolate.
2488
    __ LoadSmiLiteral(cp, Smi::zero());
2489
    __ CallRuntime(Runtime::kWasmCompileLazy, 2);
2490
    // The entrypoint address is the return value.
2491
    __ mov(ip, r2);
2492 2493

    // Restore registers.
2494
    __ MultiPopF64OrV128(fp_regs);
2495 2496
    __ MultiPop(gp_regs);
  }
2497
  // Finally, jump to the entrypoint.
2498 2499 2500
  __ Jump(ip);
}

2501 2502 2503
void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
  HardAbortScope hard_abort(masm);  // Avoid calls to Abort.
  {
2504
    FrameAndConstantPoolScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
2505 2506 2507

    // Save all parameter registers. They might hold live values, we restore
    // them after the runtime call.
2508 2509
    __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs);
    __ MultiPushDoubles(WasmDebugBreakFrameConstants::kPushedFpRegs);
2510 2511 2512 2513 2514 2515 2516

    // Initialize the JavaScript context with 0. CEntry will use it to
    // set the current context on the isolate.
    __ LoadSmiLiteral(cp, Smi::zero());
    __ CallRuntime(Runtime::kWasmDebugBreak, 0);

    // Restore registers.
2517 2518
    __ MultiPopDoubles(WasmDebugBreakFrameConstants::kPushedFpRegs);
    __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs);
2519 2520 2521 2522
  }
  __ Ret();
}

2523 2524 2525 2526
void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
  // TODO(v8:10701): Implement for this platform.
  __ Trap();
}
2527 2528 2529 2530 2531

void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
  // Only needed on x64.
  __ Trap();
}
2532 2533
#endif  // V8_ENABLE_WEBASSEMBLY

2534 2535 2536 2537 2538 2539 2540 2541 2542 2543
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
                               SaveFPRegsMode save_doubles, ArgvMode argv_mode,
                               bool builtin_exit_frame) {
  // Called from JavaScript; parameters are on stack as if calling JS function.
  // r2: number of arguments including receiver
  // r3: pointer to builtin function
  // fp: frame pointer  (restored after C call)
  // sp: stack pointer  (restored as callee's sp after C call)
  // cp: current context  (C callee-saved)
  //
2544
  // If argv_mode == ArgvMode::kRegister:
2545 2546
  // r4: pointer to the first argument

2547
  __ mov(r7, r3);
2548

2549
  if (argv_mode == ArgvMode::kRegister) {
2550
    // Move argv into the correct register.
2551
    __ mov(r3, r4);
2552 2553
  } else {
    // Compute the argv pointer.
2554
    __ ShiftLeftU64(r3, r2, Operand(kSystemPointerSizeLog2));
2555
    __ lay(r3, MemOperand(r3, sp, -kSystemPointerSize));
2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576
  }

  // Enter the exit frame that transitions from JavaScript to C++.
  FrameScope scope(masm, StackFrame::MANUAL);

  // Need at least one extra slot for return address location.
  int arg_stack_space = 1;

  // Pass buffer for return value on stack if necessary
  bool needs_return_buffer =
      result_size == 2 && !ABI_RETURNS_OBJECTPAIR_IN_REGS;
  if (needs_return_buffer) {
    arg_stack_space += result_size;
  }

#if V8_TARGET_ARCH_S390X
  // 64-bit linux pass Argument object by reference not value
  arg_stack_space += 2;
#endif

  __ EnterExitFrame(
2577
      save_doubles == SaveFPRegsMode::kSave, arg_stack_space,
2578 2579 2580
      builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);

  // Store a copy of argc, argv in callee-saved registers for later.
2581 2582
  __ mov(r6, r2);
  __ mov(r8, r3);
2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594
  // r2, r6: number of arguments including receiver  (C callee-saved)
  // r3, r8: pointer to the first argument
  // r7: pointer to builtin function  (C callee-saved)

  // Result returned in registers or stack, depending on result size and ABI.

  Register isolate_reg = r4;
  if (needs_return_buffer) {
    // The return value is 16-byte non-scalar value.
    // Use frame storage reserved by calling function to pass return
    // buffer as implicit first argument in R2.  Shfit original parameters
    // by one register each.
2595 2596
    __ mov(r4, r3);
    __ mov(r3, r2);
2597 2598
    __ la(r2,
          MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kSystemPointerSize));
2599
    isolate_reg = r5;
2600 2601
    // Clang doesn't preserve r2 (result buffer)
    // write to r8 (preserved) before entry
2602
    __ mov(r8, r2);
2603 2604 2605 2606
  }
  // Call C built-in.
  __ Move(isolate_reg, ExternalReference::isolate_address(masm->isolate()));

2607
  __ StoreReturnAddressAndCall(r7);
2608 2609 2610

  // If return value is on the stack, pop it to registers.
  if (needs_return_buffer) {
2611
    __ mov(r2, r8);
2612 2613
    __ LoadU64(r3, MemOperand(r2, kSystemPointerSize));
    __ LoadU64(r2, MemOperand(r2));
2614 2615 2616 2617
  }

  // Check result for exception sentinel.
  Label exception_returned;
2618
  __ CompareRoot(r2, RootIndex::kException);
2619 2620 2621 2622 2623 2624 2625 2626 2627
  __ beq(&exception_returned, Label::kNear);

  // Check that there is no pending exception, otherwise we
  // should have returned the exception sentinel.
  if (FLAG_debug_code) {
    Label okay;
    ExternalReference pending_exception_address = ExternalReference::Create(
        IsolateAddressId::kPendingExceptionAddress, masm->isolate());
    __ Move(r1, pending_exception_address);
2628
    __ LoadU64(r1, MemOperand(r1));
2629
    __ CompareRoot(r1, RootIndex::kTheHoleValue);
2630 2631
    // Cannot use check here as it attempts to generate call into runtime.
    __ beq(&okay, Label::kNear);
2632
    __ stop();
2633 2634 2635 2636 2637 2638 2639
    __ bind(&okay);
  }

  // Exit C frame and return.
  // r2:r3: result
  // sp: stack pointer
  // fp: frame pointer
2640
  Register argc = argv_mode == ArgvMode::kRegister
2641 2642 2643 2644
                      // We don't want to pop arguments so set argc to no_reg.
                      ? no_reg
                      // r6: still holds argc (callee-saved).
                      : r6;
2645
  __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc);
2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667
  __ b(r14);

  // Handling of exception.
  __ bind(&exception_returned);

  ExternalReference pending_handler_context_address = ExternalReference::Create(
      IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
  ExternalReference pending_handler_entrypoint_address =
      ExternalReference::Create(
          IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
  ExternalReference pending_handler_fp_address = ExternalReference::Create(
      IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
  ExternalReference pending_handler_sp_address = ExternalReference::Create(
      IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());

  // Ask the runtime for help to determine the handler. This will set r3 to
  // contain the current pending exception, don't clobber it.
  ExternalReference find_handler =
      ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
  {
    FrameScope scope(masm, StackFrame::MANUAL);
    __ PrepareCallCFunction(3, 0, r2);
2668 2669
    __ mov(r2, Operand::Zero());
    __ mov(r3, Operand::Zero());
2670 2671 2672 2673 2674 2675
    __ Move(r4, ExternalReference::isolate_address(masm->isolate()));
    __ CallCFunction(find_handler, 3);
  }

  // Retrieve the handler context, SP and FP.
  __ Move(cp, pending_handler_context_address);
2676
  __ LoadU64(cp, MemOperand(cp));
2677
  __ Move(sp, pending_handler_sp_address);
2678
  __ LoadU64(sp, MemOperand(sp));
2679
  __ Move(fp, pending_handler_fp_address);
2680
  __ LoadU64(fp, MemOperand(fp));
2681 2682 2683 2684

  // If the handler is a JS frame, restore the context to the frame. Note that
  // the context will be set to (cp == 0) for non-JS frames.
  Label skip;
2685
  __ CmpS64(cp, Operand::Zero());
2686
  __ beq(&skip, Label::kNear);
2687
  __ StoreU64(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2688 2689
  __ bind(&skip);

2690 2691 2692 2693 2694
  // Reset the masking register. This is done independent of the underlying
  // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
  // with both configurations. It is safe to always do this, because the
  // underlying register is caller-saved and can be arbitrarily clobbered.
  __ ResetSpeculationPoisonRegister();
2695

2696 2697 2698 2699 2700 2701 2702 2703 2704
  // Clear c_entry_fp, like we do in `LeaveExitFrame`.
  {
    UseScratchRegisterScope temps(masm);
    __ Move(r1, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
                                          masm->isolate()));
    __ mov(r0, Operand::Zero());
    __ StoreU64(r0, MemOperand(r1));
  }

2705 2706
  // Compute the handler entry address and jump to it.
  __ Move(r3, pending_handler_entrypoint_address);
2707
  __ LoadU64(r3, MemOperand(r3));
2708 2709 2710
  __ Jump(r3);
}

2711 2712 2713 2714
void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
  Label out_of_range, only_low, negate, done, fastpath_done;
  Register result_reg = r2;

2715
  HardAbortScope hard_abort(masm);  // Avoid calls to Abort.
2716

2717 2718 2719 2720 2721 2722 2723 2724 2725
  // Immediate values for this stub fit in instructions, so it's safe to use ip.
  Register scratch = GetRegisterThatIsNotOneOf(result_reg);
  Register scratch_low = GetRegisterThatIsNotOneOf(result_reg, scratch);
  Register scratch_high =
      GetRegisterThatIsNotOneOf(result_reg, scratch, scratch_low);
  DoubleRegister double_scratch = kScratchDoubleReg;

  __ Push(result_reg, scratch);
  // Account for saved regs.
2726
  int argument_offset = 2 * kSystemPointerSize;
2727 2728

  // Load double input.
2729
  __ LoadF64(double_scratch, MemOperand(sp, argument_offset));
2730 2731 2732 2733 2734 2735 2736 2737 2738 2739

  // Do fast-path convert from double to int.
  __ ConvertDoubleToInt64(result_reg, double_scratch);

  // Test for overflow
  __ TestIfInt32(result_reg);
  __ beq(&fastpath_done, Label::kNear);

  __ Push(scratch_high, scratch_low);
  // Account for saved regs.
2740
  argument_offset += 2 * kSystemPointerSize;
2741

2742
  __ LoadU32(scratch_high,
2743
            MemOperand(sp, argument_offset + Register::kExponentOffset));
2744
  __ LoadU32(scratch_low,
2745 2746 2747 2748 2749 2750
            MemOperand(sp, argument_offset + Register::kMantissaOffset));

  __ ExtractBitMask(scratch, scratch_high, HeapNumber::kExponentMask);
  // Load scratch with exponent - 1. This is faster than loading
  // with exponent because Bias + 1 = 1024 which is a *S390* immediate value.
  STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
2751
  __ SubS64(scratch, Operand(HeapNumber::kExponentBias + 1));
2752 2753 2754 2755
  // If exponent is greater than or equal to 84, the 32 less significant
  // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
  // the result is 0.
  // Compare exponent with 84 (compare exponent - 1 with 83).
2756
  __ CmpS64(scratch, Operand(83));
2757 2758 2759 2760 2761 2762 2763
  __ bge(&out_of_range, Label::kNear);

  // If we reach this code, 31 <= exponent <= 83.
  // So, we don't have to handle cases where 0 <= exponent <= 20 for
  // which we would need to shift right the high part of the mantissa.
  // Scratch contains exponent - 1.
  // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
2764
  __ mov(r0, Operand(51));
2765
  __ SubS64(scratch, r0, scratch);
2766
  __ CmpS64(scratch, Operand::Zero());
2767 2768 2769
  __ ble(&only_low, Label::kNear);
  // 21 <= exponent <= 51, shift scratch_low and scratch_high
  // to generate the result.
2770
  __ ShiftRightU32(scratch_low, scratch_low, scratch);
2771 2772 2773
  // Scratch contains: 52 - exponent.
  // We needs: exponent - 20.
  // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
2774
  __ mov(r0, Operand(32));
2775
  __ SubS64(scratch, r0, scratch);
2776 2777 2778
  __ ExtractBitMask(result_reg, scratch_high, HeapNumber::kMantissaMask);
  // Set the implicit 1 before the mantissa part in scratch_high.
  STATIC_ASSERT(HeapNumber::kMantissaBitsInTopWord >= 16);
2779
  __ mov(r0, Operand(1 << ((HeapNumber::kMantissaBitsInTopWord)-16)));
2780
  __ ShiftLeftU64(r0, r0, Operand(16));
2781
  __ OrP(result_reg, result_reg, r0);
2782
  __ ShiftLeftU32(r0, result_reg, scratch);
2783 2784 2785 2786 2787 2788 2789 2790 2791 2792
  __ OrP(result_reg, scratch_low, r0);
  __ b(&negate, Label::kNear);

  __ bind(&out_of_range);
  __ mov(result_reg, Operand::Zero());
  __ b(&done, Label::kNear);

  __ bind(&only_low);
  // 52 <= exponent <= 83, shift only scratch_low.
  // On entry, scratch contains: 52 - exponent.
2793 2794
  __ lcgr(scratch, scratch);
  __ ShiftLeftU32(result_reg, scratch_low, scratch);
2795 2796 2797 2798 2799 2800 2801 2802

  __ bind(&negate);
  // If input was positive, scratch_high ASR 31 equals 0 and
  // scratch_high LSR 31 equals zero.
  // New result = (result eor 0) + 0 = result.
  // If the input was negative, we have to negate the result.
  // Input_high ASR 31 equals 0xFFFFFFFF and scratch_high LSR 31 equals 1.
  // New result = (result eor 0xFFFFFFFF) + 1 = 0 - result.
2803
  __ ShiftRightS32(r0, scratch_high, Operand(31));
2804 2805
#if V8_TARGET_ARCH_S390X
  __ lgfr(r0, r0);
2806
  __ ShiftRightU64(r0, r0, Operand(32));
2807 2808
#endif
  __ XorP(result_reg, r0);
2809
  __ ShiftRightU32(r0, scratch_high, Operand(31));
2810
  __ AddS64(result_reg, r0);
2811 2812 2813

  __ bind(&done);
  __ Pop(scratch_high, scratch_low);
2814
  argument_offset -= 2 * kSystemPointerSize;
2815 2816

  __ bind(&fastpath_done);
2817
  __ StoreU64(result_reg, MemOperand(sp, argument_offset));
2818 2819 2820 2821 2822
  __ Pop(result_reg, scratch);

  __ Ret();
}

2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852
namespace {

static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
  return ref0.address() - ref1.address();
}

// Calls an API function.  Allocates HandleScope, extracts returned value
// from handle and propagates exceptions.  Restores context.  stack_space
// - space to be unwound on exit (includes the call JS arguments space and
// the additional space allocated for the fast call).
static void CallApiFunctionAndReturn(MacroAssembler* masm,
                                     Register function_address,
                                     ExternalReference thunk_ref,
                                     int stack_space,
                                     MemOperand* stack_space_operand,
                                     MemOperand return_value_operand) {
  Isolate* isolate = masm->isolate();
  ExternalReference next_address =
      ExternalReference::handle_scope_next_address(isolate);
  const int kNextOffset = 0;
  const int kLimitOffset = AddressOffset(
      ExternalReference::handle_scope_limit_address(isolate), next_address);
  const int kLevelOffset = AddressOffset(
      ExternalReference::handle_scope_level_address(isolate), next_address);

  // Additional parameter is the address of the actual callback.
  DCHECK(function_address == r3 || function_address == r4);
  Register scratch = r5;

  __ Move(scratch, ExternalReference::is_profiling_address(isolate));
2853
  __ LoadU8(scratch, MemOperand(scratch, 0));
2854
  __ CmpS64(scratch, Operand::Zero());
2855

2856 2857 2858
  Label profiler_enabled, end_profiler_check;
  __ bne(&profiler_enabled, Label::kNear);
  __ Move(scratch, ExternalReference::address_of_runtime_stats_flag());
2859
  __ LoadU32(scratch, MemOperand(scratch, 0));
2860
  __ CmpS64(scratch, Operand::Zero());
2861 2862 2863
  __ bne(&profiler_enabled, Label::kNear);
  {
    // Call the api function directly.
2864
    __ mov(scratch, function_address);
2865 2866 2867 2868 2869 2870 2871
    __ b(&end_profiler_check, Label::kNear);
  }
  __ bind(&profiler_enabled);
  {
    // Additional parameter is the address of the actual callback.
    __ Move(scratch, thunk_ref);
  }
2872 2873 2874 2875 2876 2877 2878 2879
  __ bind(&end_profiler_check);

  // Allocate HandleScope in callee-save registers.
  // r9 - next_address
  // r6 - next_address->kNextOffset
  // r7 - next_address->kLimitOffset
  // r8 - next_address->kLevelOffset
  __ Move(r9, next_address);
2880 2881
  __ LoadU64(r6, MemOperand(r9, kNextOffset));
  __ LoadU64(r7, MemOperand(r9, kLimitOffset));
2882
  __ LoadU32(r8, MemOperand(r9, kLevelOffset));
2883
  __ AddS64(r8, Operand(1));
2884
  __ StoreU32(r8, MemOperand(r9, kLevelOffset));
2885

2886
  __ StoreReturnAddressAndCall(scratch);
2887 2888 2889 2890 2891 2892 2893

  Label promote_scheduled_exception;
  Label delete_allocated_handles;
  Label leave_exit_frame;
  Label return_value_loaded;

  // load value from ReturnValue
2894
  __ LoadU64(r2, return_value_operand);
2895 2896 2897
  __ bind(&return_value_loaded);
  // No more valid handles (the result handle was the last one). Restore
  // previous handle scope.
2898
  __ StoreU64(r6, MemOperand(r9, kNextOffset));
2899
  if (FLAG_debug_code) {
2900
    __ LoadU32(r3, MemOperand(r9, kLevelOffset));
2901
    __ CmpS64(r3, r8);
2902 2903
    __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
  }
2904
  __ SubS64(r8, Operand(1));
2905
  __ StoreU32(r8, MemOperand(r9, kLevelOffset));
2906
  __ CmpS64(r7, MemOperand(r9, kLimitOffset));
2907 2908 2909 2910 2911
  __ bne(&delete_allocated_handles, Label::kNear);

  // Leave the API exit frame.
  __ bind(&leave_exit_frame);
  // LeaveExitFrame expects unwind space to be in a register.
2912 2913
  if (stack_space_operand == nullptr) {
    DCHECK_NE(stack_space, 0);
2914
    __ mov(r6, Operand(stack_space));
2915 2916
  } else {
    DCHECK_EQ(stack_space, 0);
2917
    __ LoadU64(r6, *stack_space_operand);
2918 2919 2920 2921 2922
  }
  __ LeaveExitFrame(false, r6, stack_space_operand != nullptr);

  // Check if the function scheduled an exception.
  __ Move(r7, ExternalReference::scheduled_exception_address(isolate));
2923
  __ LoadU64(r7, MemOperand(r7));
2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934
  __ CompareRoot(r7, RootIndex::kTheHoleValue);
  __ bne(&promote_scheduled_exception, Label::kNear);

  __ b(r14);

  // Re-throw by promoting a scheduled exception.
  __ bind(&promote_scheduled_exception);
  __ TailCallRuntime(Runtime::kPromoteScheduledException);

  // HandleScope limit has changed. Delete allocated extensions.
  __ bind(&delete_allocated_handles);
2935
  __ StoreU64(r7, MemOperand(r9, kLimitOffset));
2936
  __ mov(r6, r2);
2937 2938 2939
  __ PrepareCallCFunction(1, r7);
  __ Move(r2, ExternalReference::isolate_address(isolate));
  __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
2940
  __ mov(r2, r6);
2941 2942 2943 2944 2945 2946 2947
  __ b(&leave_exit_frame, Label::kNear);
}

}  // namespace

void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
  // ----------- S t a t e -------------
2948 2949 2950 2951 2952
  //  -- cp                  : context
  //  -- r4                  : api function address
  //  -- r4                  : arguments count (not including the receiver)
  //  -- r5                  : call data
  //  -- r2                  : holder
2953 2954
  //  -- sp[0]               : receiver
  //  -- sp[8]               : first argument
2955
  //  -- ...
2956
  //  -- sp[(argc) * 8]      : last argument
2957 2958 2959 2960
  // -----------------------------------

  Register api_function_address = r3;
  Register argc = r4;
2961 2962
  Register call_data = r5;
  Register holder = r2;
2963
  Register scratch = r6;
2964
  DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch));
2965

2966
  using FCA = FunctionCallbackArguments;
2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978

  STATIC_ASSERT(FCA::kArgsLength == 6);
  STATIC_ASSERT(FCA::kNewTargetIndex == 5);
  STATIC_ASSERT(FCA::kDataIndex == 4);
  STATIC_ASSERT(FCA::kReturnValueOffset == 3);
  STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
  STATIC_ASSERT(FCA::kIsolateIndex == 1);
  STATIC_ASSERT(FCA::kHolderIndex == 0);

  // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
  //
  // Target state:
2979 2980 2981 2982 2983 2984
  //   sp[0 * kSystemPointerSize]: kHolder
  //   sp[1 * kSystemPointerSize]: kIsolate
  //   sp[2 * kSystemPointerSize]: undefined (kReturnValueDefaultValue)
  //   sp[3 * kSystemPointerSize]: undefined (kReturnValue)
  //   sp[4 * kSystemPointerSize]: kData
  //   sp[5 * kSystemPointerSize]: undefined (kNewTarget)
2985 2986

  // Reserve space on the stack.
2987
  __ lay(sp, MemOperand(sp, -(FCA::kArgsLength * kSystemPointerSize)));
2988 2989

  // kHolder.
2990
  __ StoreU64(holder, MemOperand(sp, 0 * kSystemPointerSize));
2991 2992 2993

  // kIsolate.
  __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
2994
  __ StoreU64(scratch, MemOperand(sp, 1 * kSystemPointerSize));
2995

2996
  // kReturnValueDefaultValue and kReturnValue.
2997
  __ LoadRoot(scratch, RootIndex::kUndefinedValue);
2998 2999
  __ StoreU64(scratch, MemOperand(sp, 2 * kSystemPointerSize));
  __ StoreU64(scratch, MemOperand(sp, 3 * kSystemPointerSize));
3000 3001

  // kData.
3002
  __ StoreU64(call_data, MemOperand(sp, 4 * kSystemPointerSize));
3003 3004

  // kNewTarget.
3005
  __ StoreU64(scratch, MemOperand(sp, 5 * kSystemPointerSize));
3006 3007 3008

  // Keep a pointer to kHolder (= implicit_args) in a scratch register.
  // We use it below to set up the FunctionCallbackInfo object.
3009
  __ mov(scratch, sp);
3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026

  // Allocate the v8::Arguments structure in the arguments' space since
  // it's not controlled by GC.
  // S390 LINUX ABI:
  //
  // Create 4 extra slots on stack:
  //    [0] space for DirectCEntryStub's LR save
  //    [1-3] FunctionCallbackInfo
  //    [4] number of bytes to drop from the stack after returning
  static constexpr int kApiStackSpace = 5;
  static constexpr bool kDontSaveDoubles = false;

  FrameScope frame_scope(masm, StackFrame::MANUAL);
  __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);

  // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
  // Arguments are after the return address (pushed by EnterExitFrame()).
3027 3028
  __ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 1) *
                                          kSystemPointerSize));
3029 3030 3031

  // FunctionCallbackInfo::values_ (points at the first varargs argument passed
  // on the stack).
3032 3033
  __ AddS64(scratch, scratch,
            Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
3034 3035
  __ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 2) *
                                          kSystemPointerSize));
3036 3037

  // FunctionCallbackInfo::length_.
3038 3039
  __ StoreU32(argc, MemOperand(sp, (kStackFrameExtraParamSlot + 3) *
                                       kSystemPointerSize));
3040 3041 3042 3043

  // We also store the number of bytes to drop from the stack after returning
  // from the API function here.
  __ mov(scratch,
3044
         Operand((FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize));
3045
  __ ShiftLeftU64(r1, argc, Operand(kSystemPointerSizeLog2));
3046
  __ AddS64(scratch, r1);
3047 3048
  __ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 4) *
                                          kSystemPointerSize));
3049 3050 3051

  // v8::InvocationCallback's argument.
  __ lay(r2,
3052
         MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kSystemPointerSize));
3053 3054 3055 3056 3057 3058 3059

  ExternalReference thunk_ref = ExternalReference::invoke_function_callback();

  // There are two stack slots above the arguments we constructed on the stack.
  // TODO(jgruber): Document what these arguments are.
  static constexpr int kStackSlotsAboveFCA = 2;
  MemOperand return_value_operand(
3060
      fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize);
3061 3062 3063

  static constexpr int kUseStackSpaceOperand = 0;
  MemOperand stack_space_operand(
3064
      sp, (kStackFrameExtraParamSlot + 4) * kSystemPointerSize);
3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096

  AllowExternalCallThatCantCauseGC scope(masm);
  CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
                           kUseStackSpaceOperand, &stack_space_operand,
                           return_value_operand);
}

void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
  int arg0Slot = 0;
  int accessorInfoSlot = 0;
  int apiStackSpace = 0;
  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
  // name below the exit frame to make GC aware of them.
  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);

  Register receiver = ApiGetterDescriptor::ReceiverRegister();
  Register holder = ApiGetterDescriptor::HolderRegister();
  Register callback = ApiGetterDescriptor::CallbackRegister();
  Register scratch = r6;
  DCHECK(!AreAliased(receiver, holder, callback, scratch));

  Register api_function_address = r4;

  __ push(receiver);
  // Push data from AccessorInfo.
3097 3098
  __ LoadAnyTaggedField(
      scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset), r1);
3099 3100 3101 3102 3103 3104
  __ push(scratch);
  __ LoadRoot(scratch, RootIndex::kUndefinedValue);
  __ Push(scratch, scratch);
  __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
  __ Push(scratch, holder);
  __ Push(Smi::zero());  // should_throw_on_error -> false
3105 3106
  __ LoadTaggedPointerField(
      scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset), r1);
3107 3108 3109 3110 3111 3112
  __ push(scratch);

  // v8::PropertyCallbackInfo::args_ array and name handle.
  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;

  // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
3113
  __ mov(r2, sp);                                    // r2 = Handle<Name>
3114
  __ AddS64(r3, r2, Operand(1 * kSystemPointerSize));  // r3 = v8::PCI::args_
3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141

  // If ABI passes Handles (pointer-sized struct) in a register:
  //
  // Create 2 extra slots on stack:
  //    [0] space for DirectCEntryStub's LR save
  //    [1] AccessorInfo&
  //
  // Otherwise:
  //
  // Create 3 extra slots on stack:
  //    [0] space for DirectCEntryStub's LR save
  //    [1] copy of Handle (first arg)
  //    [2] AccessorInfo&
  if (ABI_PASSES_HANDLES_IN_REGS) {
    accessorInfoSlot = kStackFrameExtraParamSlot + 1;
    apiStackSpace = 2;
  } else {
    arg0Slot = kStackFrameExtraParamSlot + 1;
    accessorInfoSlot = arg0Slot + 1;
    apiStackSpace = 3;
  }

  FrameScope frame_scope(masm, StackFrame::MANUAL);
  __ EnterExitFrame(false, apiStackSpace);

  if (!ABI_PASSES_HANDLES_IN_REGS) {
    // pass 1st arg by reference
3142
    __ StoreU64(r2, MemOperand(sp, arg0Slot * kSystemPointerSize));
3143
    __ AddS64(r2, sp, Operand(arg0Slot * kSystemPointerSize));
3144 3145 3146 3147
  }

  // Create v8::PropertyCallbackInfo object on the stack and initialize
  // it's args_ field.
3148
  __ StoreU64(r3, MemOperand(sp, accessorInfoSlot * kSystemPointerSize));
3149
  __ AddS64(r3, sp, Operand(accessorInfoSlot * kSystemPointerSize));
3150 3151 3152 3153 3154
  // r3 = v8::PropertyCallbackInfo&

  ExternalReference thunk_ref =
      ExternalReference::invoke_accessor_getter_callback();

3155 3156
  __ LoadTaggedPointerField(
      scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
3157 3158
  __ LoadU64(api_function_address,
             FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
3159 3160 3161

  // +3 is to skip prolog, return address and name handle.
  MemOperand return_value_operand(
3162 3163
      fp,
      (PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize);
3164 3165 3166 3167 3168 3169
  MemOperand* const kUseStackSpaceConstant = nullptr;
  CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
                           kStackUnwindSpace, kUseStackSpaceConstant,
                           return_value_operand);
}

3170 3171
void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
  // Unused.
3172
  __ stop();
3173 3174
}

3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196
namespace {

// This code tries to be close to ia32 code so that any changes can be
// easily ported.
void Generate_DeoptimizationEntry(MacroAssembler* masm,
                                  DeoptimizeKind deopt_kind) {
  Isolate* isolate = masm->isolate();

  // Save all the registers onto the stack
  const int kNumberOfRegisters = Register::kNumRegisters;

  RegList restored_regs = kJSCallerSaved | kCalleeSaved;

  const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;

  // Save all double registers before messing with them.
  __ lay(sp, MemOperand(sp, -kDoubleRegsSize));
  const RegisterConfiguration* config = RegisterConfiguration::Default();
  for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
    int code = config->GetAllocatableDoubleCode(i);
    const DoubleRegister dreg = DoubleRegister::from_code(code);
    int offset = code * kDoubleSize;
3197
    __ StoreF64(dreg, MemOperand(sp, offset));
3198 3199 3200 3201 3202 3203 3204 3205
  }

  // Push all GPRs onto the stack
  __ lay(sp, MemOperand(sp, -kNumberOfRegisters * kSystemPointerSize));
  __ StoreMultipleP(r0, sp, MemOperand(sp));  // Save all 16 registers

  __ Move(r1, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
                                        isolate));
3206
  __ StoreU64(fp, MemOperand(r1));
3207 3208 3209 3210

  static constexpr int kSavedRegistersAreaSize =
      (kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;

3211
  __ mov(r4, Operand(Deoptimizer::kFixedExitSizeMarker));
3212 3213 3214 3215 3216
  // Cleanse the Return address for 31-bit
  __ CleanseP(r14);
  // Get the address of the location in the code object (r5)(return
  // address for lazy deoptimization) and compute the fp-to-sp delta in
  // register r6.
3217
  __ mov(r5, r14);
3218
  __ la(r6, MemOperand(sp, kSavedRegistersAreaSize));
3219
  __ SubS64(r6, fp, r6);
3220 3221 3222 3223

  // Allocate a new deoptimizer object.
  // Pass six arguments in r2 to r7.
  __ PrepareCallCFunction(6, r7);
3224
  __ mov(r2, Operand::Zero());
3225
  Label context_check;
3226 3227
  __ LoadU64(r3,
             MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
3228
  __ JumpIfSmi(r3, &context_check);
3229
  __ LoadU64(r2, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
3230
  __ bind(&context_check);
3231
  __ mov(r3, Operand(static_cast<int>(deopt_kind)));
3232 3233 3234 3235 3236
  // r4: bailout id already loaded.
  // r5: code address or 0 already loaded.
  // r6: Fp-to-sp delta.
  // Parm6: isolate is passed on the stack.
  __ Move(r7, ExternalReference::isolate_address(isolate));
3237 3238
  __ StoreU64(r7,
              MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
3239 3240 3241 3242 3243 3244 3245 3246 3247

  // Call Deoptimizer::New().
  {
    AllowExternalCallThatCantCauseGC scope(masm);
    __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
  }

  // Preserve "deoptimizer" object in register r2 and get the input
  // frame descriptor pointer to r3 (deoptimizer->input_);
3248
  __ LoadU64(r3, MemOperand(r2, Deoptimizer::input_offset()));
3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259

  // Copy core registers into FrameDescription::registers_[kNumRegisters].
  // DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
  // __ mvc(MemOperand(r3, FrameDescription::registers_offset()),
  //        MemOperand(sp), kNumberOfRegisters * kSystemPointerSize);
  // Copy core registers into FrameDescription::registers_[kNumRegisters].
  // TODO(john.yan): optimize the following code by using mvc instruction
  DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
  for (int i = 0; i < kNumberOfRegisters; i++) {
    int offset =
        (i * kSystemPointerSize) + FrameDescription::registers_offset();
3260
    __ LoadU64(r4, MemOperand(sp, i * kSystemPointerSize));
3261
    __ StoreU64(r4, MemOperand(r3, offset));
3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272
  }

  int double_regs_offset = FrameDescription::double_registers_offset();
  // Copy double registers to
  // double_registers_[DoubleRegister::kNumRegisters]
  for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
    int code = config->GetAllocatableDoubleCode(i);
    int dst_offset = code * kDoubleSize + double_regs_offset;
    int src_offset =
        code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize;
    // TODO(joransiu): MVC opportunity
3273
    __ LoadF64(d0, MemOperand(sp, src_offset));
3274
    __ StoreF64(d0, MemOperand(r3, dst_offset));
3275 3276 3277 3278 3279 3280 3281 3282 3283 3284
  }

  // Mark the stack as not iterable for the CPU profiler which won't be able to
  // walk the stack without the return address.
  {
    UseScratchRegisterScope temps(masm);
    Register is_iterable = temps.Acquire();
    Register zero = r6;
    __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
    __ lhi(zero, Operand(0));
3285
    __ StoreU8(zero, MemOperand(is_iterable));
3286 3287 3288 3289 3290 3291 3292
  }

  // Remove the saved registers from the stack.
  __ la(sp, MemOperand(sp, kSavedRegistersAreaSize));

  // Compute a pointer to the unwinding limit in register r4; that is
  // the first stack slot not part of the input frame.
3293
  __ LoadU64(r4, MemOperand(r3, FrameDescription::frame_size_offset()));
3294
  __ AddS64(r4, sp);
3295 3296 3297 3298 3299 3300 3301 3302 3303 3304

  // Unwind the stack down to - but not including - the unwinding
  // limit and copy the contents of the activation frame to the input
  // frame description.
  __ la(r5, MemOperand(r3, FrameDescription::frame_content_offset()));
  Label pop_loop;
  Label pop_loop_header;
  __ b(&pop_loop_header, Label::kNear);
  __ bind(&pop_loop);
  __ pop(r6);
3305
  __ StoreU64(r6, MemOperand(r5, 0));
3306 3307
  __ la(r5, MemOperand(r5, kSystemPointerSize));
  __ bind(&pop_loop_header);
3308
  __ CmpS64(r4, sp);
3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321
  __ bne(&pop_loop);

  // Compute the output frame in the deoptimizer.
  __ push(r2);  // Preserve deoptimizer object across call.
  // r2: deoptimizer object; r3: scratch.
  __ PrepareCallCFunction(1, r3);
  // Call Deoptimizer::ComputeOutputFrames().
  {
    AllowExternalCallThatCantCauseGC scope(masm);
    __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
  }
  __ pop(r2);  // Restore deoptimizer object (class Deoptimizer).

3322
  __ LoadU64(sp, MemOperand(r2, Deoptimizer::caller_frame_top_offset()));
3323 3324 3325 3326 3327

  // Replace the current (input) frame with the output frames.
  Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
  // Outer loop state: r6 = current "FrameDescription** output_",
  // r3 = one past the last FrameDescription**.
3328
  __ LoadU32(r3, MemOperand(r2, Deoptimizer::output_count_offset()));
3329 3330
  __ LoadU64(r6,
             MemOperand(r2, Deoptimizer::output_offset()));  // r6 is output_.
3331
  __ ShiftLeftU64(r3, r3, Operand(kSystemPointerSizeLog2));
3332
  __ AddS64(r3, r6, r3);
3333 3334 3335 3336
  __ b(&outer_loop_header, Label::kNear);

  __ bind(&outer_push_loop);
  // Inner loop state: r4 = current FrameDescription*, r5 = loop index.
3337 3338
  __ LoadU64(r4, MemOperand(r6, 0));  // output_[ix]
  __ LoadU64(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
3339 3340 3341
  __ b(&inner_loop_header, Label::kNear);

  __ bind(&inner_push_loop);
3342 3343
  __ SubS64(r5, Operand(sizeof(intptr_t)));
  __ AddS64(r8, r4, r5);
3344
  __ LoadU64(r8, MemOperand(r8, FrameDescription::frame_content_offset()));
3345 3346 3347
  __ push(r8);

  __ bind(&inner_loop_header);
3348
  __ CmpS64(r5, Operand::Zero());
3349 3350
  __ bne(&inner_push_loop);  // test for gt?

3351
  __ AddS64(r6, r6, Operand(kSystemPointerSize));
3352
  __ bind(&outer_loop_header);
3353
  __ CmpS64(r6, r3);
3354 3355
  __ blt(&outer_push_loop);

3356
  __ LoadU64(r3, MemOperand(r2, Deoptimizer::input_offset()));
3357 3358 3359 3360 3361 3362 3363 3364
  for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
    int code = config->GetAllocatableDoubleCode(i);
    const DoubleRegister dreg = DoubleRegister::from_code(code);
    int src_offset = code * kDoubleSize + double_regs_offset;
    __ ld(dreg, MemOperand(r3, src_offset));
  }

  // Push pc and continuation from the last output frame.
3365
  __ LoadU64(r8, MemOperand(r4, FrameDescription::pc_offset()));
3366
  __ push(r8);
3367
  __ LoadU64(r8, MemOperand(r4, FrameDescription::continuation_offset()));
3368 3369 3370
  __ push(r8);

  // Restore the registers from the last output frame.
3371
  __ mov(r1, r4);
3372 3373 3374 3375
  for (int i = kNumberOfRegisters - 1; i > 0; i--) {
    int offset =
        (i * kSystemPointerSize) + FrameDescription::registers_offset();
    if ((restored_regs & (1 << i)) != 0) {
3376
      __ LoadU64(ToRegister(i), MemOperand(r1, offset));
3377 3378 3379 3380 3381 3382 3383 3384 3385
    }
  }

  {
    UseScratchRegisterScope temps(masm);
    Register is_iterable = temps.Acquire();
    Register one = r6;
    __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
    __ lhi(one, Operand(1));
3386
    __ StoreU8(one, MemOperand(is_iterable));
3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413
  }

  __ pop(ip);  // get continuation, leave pc on stack
  __ pop(r14);
  __ Jump(ip);

  __ stop();
}

}  // namespace

void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
  Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
}

void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
  Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
}

void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
  Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
}

void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
  Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}

3414 3415 3416 3417 3418 3419 3420 3421 3422 3423
void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
  // Implement on this platform, https://crrev.com/c/2695591.
  __ bkpt(0);
}

void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
  // Implement on this platform, https://crrev.com/c/2695591.
  __ bkpt(0);
}

3424 3425 3426 3427 3428 3429
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
    MacroAssembler* masm) {
  // Implement on this platform, https://crrev.com/c/2800112.
  __ bkpt(0);
}

3430
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
3431 3432 3433
  FrameScope scope(masm, StackFrame::MANUAL);
  __ EnterFrame(StackFrame::INTERNAL);

3434 3435
  // Only save the registers that the DynamicCheckMaps builtin can clobber.
  DynamicCheckMapsDescriptor descriptor;
3436 3437 3438 3439
  RegList registers = descriptor.allocatable_registers();
  // FLAG_debug_code is enabled CSA checks will call C function and so we need
  // to save all CallerSaved registers too.
  if (FLAG_debug_code) registers |= kJSCallerSaved;
3440
  __ MaybeSaveRegisters(registers);
3441

3442 3443 3444 3445 3446
  // Load the immediate arguments from the deopt exit to pass to the builtin.
  Register slot_arg =
      descriptor.GetRegisterParameter(DynamicCheckMapsDescriptor::kSlot);
  Register handler_arg =
      descriptor.GetRegisterParameter(DynamicCheckMapsDescriptor::kHandler);
3447 3448 3449
  __ LoadU64(handler_arg,
             MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
  __ LoadU64(
3450 3451
      slot_arg,
      MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
3452
  __ LoadU64(
3453 3454 3455
      handler_arg,
      MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));

3456
  __ Call(BUILTIN_CODE(masm->isolate(), DynamicCheckMaps),
3457 3458 3459
          RelocInfo::CODE_TARGET);

  Label deopt, bailout;
3460
  __ CmpS64(r2, Operand(static_cast<int>(DynamicCheckMapsStatus::kSuccess)));
3461 3462
  __ bne(&deopt);

3463
  __ MaybeRestoreRegisters(registers);
3464 3465 3466 3467
  __ LeaveFrame(StackFrame::INTERNAL);
  __ Ret();

  __ bind(&deopt);
3468
  __ CmpS64(r2, Operand(static_cast<int>(DynamicCheckMapsStatus::kBailout)));
3469 3470 3471
  __ beq(&bailout);

  if (FLAG_debug_code) {
3472
    __ CmpS64(r2, Operand(static_cast<int>(DynamicCheckMapsStatus::kDeopt)));
3473
    __ Assert(eq, AbortReason::kUnexpectedDynamicCheckMapsStatus);
3474
  }
3475
  __ MaybeRestoreRegisters(registers);
3476
  __ LeaveFrame(StackFrame::INTERNAL);
3477
  Handle<Code> deopt_eager = masm->isolate()->builtins()->code_handle(
3478 3479 3480 3481
      Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager));
  __ Jump(deopt_eager, RelocInfo::CODE_TARGET);

  __ bind(&bailout);
3482
  __ MaybeRestoreRegisters(registers);
3483
  __ LeaveFrame(StackFrame::INTERNAL);
3484
  Handle<Code> deopt_bailout = masm->isolate()->builtins()->code_handle(
3485 3486 3487 3488
      Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout));
  __ Jump(deopt_bailout, RelocInfo::CODE_TARGET);
}

3489 3490 3491 3492 3493 3494
#undef __

}  // namespace internal
}  // namespace v8

#endif  // V8_TARGET_ARCH_S390