macro-assembler-s390.h 53.8 KB
Newer Older
1 2 3 4
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

5 6 7 8
#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
#error This header must be included via macro-assembler.h
#endif

9 10 11 12
#ifndef V8_S390_MACRO_ASSEMBLER_S390_H_
#define V8_S390_MACRO_ASSEMBLER_S390_H_

#include "src/bailout-reason.h"
13
#include "src/contexts.h"
14
#include "src/globals.h"
15
#include "src/s390/assembler-s390.h"
16 17 18 19 20

namespace v8 {
namespace internal {

// Give alias names to registers for calling conventions.
21 22 23 24 25 26
constexpr Register kReturnRegister0 = r2;
constexpr Register kReturnRegister1 = r3;
constexpr Register kReturnRegister2 = r4;
constexpr Register kJSFunctionRegister = r3;
constexpr Register kContextRegister = r13;
constexpr Register kAllocateSizeRegister = r3;
27
constexpr Register kSpeculationPoisonRegister = r9;
28 29 30 31
constexpr Register kInterpreterAccumulatorRegister = r2;
constexpr Register kInterpreterBytecodeOffsetRegister = r6;
constexpr Register kInterpreterBytecodeArrayRegister = r7;
constexpr Register kInterpreterDispatchTableRegister = r8;
32

33 34
constexpr Register kJavaScriptCallArgCountRegister = r2;
constexpr Register kJavaScriptCallCodeStartRegister = r4;
35 36 37 38
constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
constexpr Register kJavaScriptCallNewTargetRegister = r5;
constexpr Register kJavaScriptCallExtraArg1Register = r4;

39 40 41
constexpr Register kOffHeapTrampolineRegister = ip;
constexpr Register kRuntimeCallFunctionRegister = r3;
constexpr Register kRuntimeCallArgCountRegister = r2;
42
constexpr Register kRuntimeCallArgvRegister = r4;
43
constexpr Register kWasmInstanceRegister = r6;
44
constexpr Register kWasmCompileLazyFuncIndexRegister = r7;
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152

// ----------------------------------------------------------------------------
// Static helper functions

// Generate a MemOperand for loading a field from an object.
inline MemOperand FieldMemOperand(Register object, int offset) {
  return MemOperand(object, offset - kHeapObjectTag);
}

// Generate a MemOperand for loading a field from an object.
inline MemOperand FieldMemOperand(Register object, Register index, int offset) {
  return MemOperand(object, index, offset - kHeapObjectTag);
}

enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };

Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
                                   Register reg3 = no_reg,
                                   Register reg4 = no_reg,
                                   Register reg5 = no_reg,
                                   Register reg6 = no_reg);

// These exist to provide portability between 32 and 64bit
#if V8_TARGET_ARCH_S390X

// The length of the arithmetic operation is the length
// of the register.

// Length:
// H = halfword
// W = word

// arithmetics and bitwise
#define AddMI agsi
#define AddRR agr
#define SubRR sgr
#define AndRR ngr
#define OrRR ogr
#define XorRR xgr
#define LoadComplementRR lcgr
#define LoadNegativeRR lngr

// Distinct Operands
#define AddP_RRR agrk
#define AddPImm_RRI aghik
#define AddLogicalP_RRR algrk
#define SubP_RRR sgrk
#define SubLogicalP_RRR slgrk
#define AndP_RRR ngrk
#define OrP_RRR ogrk
#define XorP_RRR xgrk

// Load / Store
#define LoadRR lgr
#define LoadAndTestRR ltgr
#define LoadImmP lghi

// Compare
#define CmpPH cghi
#define CmpLogicalPW clgfi

// Shifts
#define ShiftLeftP sllg
#define ShiftRightP srlg
#define ShiftLeftArithP slag
#define ShiftRightArithP srag
#else

// arithmetics and bitwise
// Reg2Reg
#define AddMI asi
#define AddRR ar
#define SubRR sr
#define AndRR nr
#define OrRR or_z
#define XorRR xr
#define LoadComplementRR lcr
#define LoadNegativeRR lnr

// Distinct Operands
#define AddP_RRR ark
#define AddPImm_RRI ahik
#define AddLogicalP_RRR alrk
#define SubP_RRR srk
#define SubLogicalP_RRR slrk
#define AndP_RRR nrk
#define OrP_RRR ork
#define XorP_RRR xrk

// Load / Store
#define LoadRR lr
#define LoadAndTestRR ltr
#define LoadImmP lhi

// Compare
#define CmpPH chi
#define CmpLogicalPW clfi

// Shifts
#define ShiftLeftP ShiftLeft
#define ShiftRightP ShiftRight
#define ShiftLeftArithP ShiftLeftArith
#define ShiftRightArithP ShiftRightArith

#endif

153
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
154
 public:
155 156 157
  TurboAssembler(const AssemblerOptions& options, void* buffer, int buffer_size)
      : TurboAssemblerBase(options, buffer, buffer_size) {}

158 159 160
  TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
                 void* buffer, int buffer_size,
                 CodeObjectRequired create_code_object)
161 162
      : TurboAssemblerBase(isolate, options, buffer, buffer_size,
                           create_code_object) {}
163

164 165
  void LoadFromConstantsTable(Register destination,
                              int constant_index) override;
166
  void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
167
  void LoadRootRelative(Register destination, int32_t offset) override;
168

169
  // Jump, Call, and Ret pseudo instructions implementing inter-working.
170
  void Jump(Register target, Condition cond = al);
171
  void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
172
  void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
173 174 175 176 177
  // Jump the register contains a smi.
  inline void JumpIfSmi(Register value, Label* smi_label) {
    TestIfSmi(value);
    beq(smi_label /*, cr0*/);  // branch if SMI
  }
178 179 180
  void JumpIfEqual(Register x, int32_t y, Label* dest);
  void JumpIfLessThan(Register x, int32_t y, Label* dest);

181 182 183 184 185 186 187
  void Call(Register target);
  void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
  void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
            Condition cond = al);
  void Ret() { b(r14); }
  void Ret(Condition cond) { b(cond, r14); }

188 189 190
  void CallForDeoptimization(Address target, int deopt_id,
                             RelocInfo::Mode rmode) {
    USE(deopt_id);
191 192
    Call(target, rmode);
  }
193

194 195 196 197 198 199 200 201 202 203 204 205 206
  // Emit code to discard a non-negative number of pointer-sized elements
  // from the stack, clobbering only the sp register.
  void Drop(int count);
  void Drop(Register count, Register scratch = r0);

  void Ret(int drop) {
    Drop(drop);
    Ret();
  }

  void Call(Label* target);

  // Register move. May do nothing if the registers are identical.
207
  void Move(Register dst, Smi smi) { LoadSmiLiteral(dst, smi); }
208
  void Move(Register dst, Handle<HeapObject> value);
209
  void Move(Register dst, ExternalReference reference);
210 211 212
  void Move(Register dst, Register src, Condition cond = al);
  void Move(DoubleRegister dst, DoubleRegister src);

213 214 215 216 217 218 219 220 221 222 223 224 225
  void MoveChar(const MemOperand& opnd1, const MemOperand& opnd2,
                   const Operand& length);

  void CompareLogicalChar(const MemOperand& opnd1, const MemOperand& opnd2,
                   const Operand& length);

  void ExclusiveOrChar(const MemOperand& opnd1, const MemOperand& opnd2,
                   const Operand& length);

  void RotateInsertSelectBits(Register dst, Register src,
                     const Operand& startBit, const Operand& endBit,
                     const Operand& shiftAmt, bool zeroBits);

226 227
  void BranchRelativeOnIdxHighP(Register dst, Register inc, Label* L);

228 229 230 231 232 233
  void SaveRegisters(RegList registers);
  void RestoreRegisters(RegList registers);

  void CallRecordWriteStub(Register object, Register address,
                           RememberedSetAction remembered_set_action,
                           SaveFPRegsMode fp_mode);
234 235 236
  void CallRecordWriteStub(Register object, Register address,
                           RememberedSetAction remembered_set_action,
                           SaveFPRegsMode fp_mode, Address wasm_target);
237

238 239 240 241 242 243
  void MultiPush(RegList regs, Register location = sp);
  void MultiPop(RegList regs, Register location = sp);

  void MultiPushDoubles(RegList dregs, Register location = sp);
  void MultiPopDoubles(RegList dregs, Register location = sp);

244 245 246 247 248 249 250 251 252 253
  // Calculate how much stack space (in bytes) are required to store caller
  // registers excluding those specified in the arguments.
  int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
                                      Register exclusion1 = no_reg,
                                      Register exclusion2 = no_reg,
                                      Register exclusion3 = no_reg) const;

  // Push caller saved registers on the stack, and return the number of bytes
  // stack pointer is adjusted.
  int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
254 255
                      Register exclusion2 = no_reg,
                      Register exclusion3 = no_reg);
256 257 258 259 260
  // Restore caller saved registers from the stack, and return the number of
  // bytes stack pointer is adjusted.
  int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
                     Register exclusion2 = no_reg,
                     Register exclusion3 = no_reg);
261

262
  // Load an object from the root table.
263
  void LoadRoot(Register destination, RootIndex index) override {
264 265
    LoadRoot(destination, index, al);
  }
266
  void LoadRoot(Register destination, RootIndex index, Condition cond);
267 268 269 270 271 272 273 274
  //--------------------------------------------------------------------------
  // S390 Macro Assemblers for Instructions
  //--------------------------------------------------------------------------

  // Arithmetic Operations

  // Add (Register - Immediate)
  void Add32(Register dst, const Operand& imm);
jyan's avatar
jyan committed
275
  void Add32_RI(Register dst, const Operand& imm);
276 277
  void AddP(Register dst, const Operand& imm);
  void Add32(Register dst, Register src, const Operand& imm);
jyan's avatar
jyan committed
278
  void Add32_RRI(Register dst, Register src, const Operand& imm);
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
  void AddP(Register dst, Register src, const Operand& imm);

  // Add (Register - Register)
  void Add32(Register dst, Register src);
  void AddP(Register dst, Register src);
  void AddP_ExtendSrc(Register dst, Register src);
  void Add32(Register dst, Register src1, Register src2);
  void AddP(Register dst, Register src1, Register src2);
  void AddP_ExtendSrc(Register dst, Register src1, Register src2);

  // Add (Register - Mem)
  void Add32(Register dst, const MemOperand& opnd);
  void AddP(Register dst, const MemOperand& opnd);
  void AddP_ExtendSrc(Register dst, const MemOperand& opnd);

  // Add (Mem - Immediate)
  void Add32(const MemOperand& opnd, const Operand& imm);
  void AddP(const MemOperand& opnd, const Operand& imm);

298 299 300 301 302 303
  // Add Logical (Register - Register)
  void AddLogical32(Register dst, Register src1, Register src2);

  // Add Logical With Carry (Register - Register)
  void AddLogicalWithCarry32(Register dst, Register src1, Register src2);

304 305 306 307 308 309 310 311 312 313
  // Add Logical (Register - Immediate)
  void AddLogical(Register dst, const Operand& imm);
  void AddLogicalP(Register dst, const Operand& imm);

  // Add Logical (Register - Mem)
  void AddLogical(Register dst, const MemOperand& opnd);
  void AddLogicalP(Register dst, const MemOperand& opnd);

  // Subtract (Register - Immediate)
  void Sub32(Register dst, const Operand& imm);
jyan's avatar
jyan committed
314
  void Sub32_RI(Register dst, const Operand& imm) { Sub32(dst, imm); }
315 316
  void SubP(Register dst, const Operand& imm);
  void Sub32(Register dst, Register src, const Operand& imm);
jyan's avatar
jyan committed
317 318 319
  void Sub32_RRI(Register dst, Register src, const Operand& imm) {
    Sub32(dst, src, imm);
  }
320 321 322 323 324 325 326 327 328 329 330 331 332 333
  void SubP(Register dst, Register src, const Operand& imm);

  // Subtract (Register - Register)
  void Sub32(Register dst, Register src);
  void SubP(Register dst, Register src);
  void SubP_ExtendSrc(Register dst, Register src);
  void Sub32(Register dst, Register src1, Register src2);
  void SubP(Register dst, Register src1, Register src2);
  void SubP_ExtendSrc(Register dst, Register src1, Register src2);

  // Subtract (Register - Mem)
  void Sub32(Register dst, const MemOperand& opnd);
  void SubP(Register dst, const MemOperand& opnd);
  void SubP_ExtendSrc(Register dst, const MemOperand& opnd);
334
  void LoadAndSub32(Register dst, Register src, const MemOperand& opnd);
335
  void LoadAndSub64(Register dst, Register src, const MemOperand& opnd);
336 337 338 339 340

  // Subtract Logical (Register - Mem)
  void SubLogical(Register dst, const MemOperand& opnd);
  void SubLogicalP(Register dst, const MemOperand& opnd);
  void SubLogicalP_ExtendSrc(Register dst, const MemOperand& opnd);
341 342 343 344
  // Subtract Logical 32-bit
  void SubLogical32(Register dst, Register src1, Register src2);
  // Subtract Logical With Borrow 32-bit
  void SubLogicalWithBorrow32(Register dst, Register src1, Register src2);
345 346 347 348 349 350

  // Multiply
  void MulP(Register dst, const Operand& opnd);
  void MulP(Register dst, Register src);
  void MulP(Register dst, const MemOperand& opnd);
  void Mul(Register dst, Register src1, Register src2);
351 352 353
  void Mul32(Register dst, const MemOperand& src1);
  void Mul32(Register dst, Register src1);
  void Mul32(Register dst, const Operand& src1);
jyan's avatar
jyan committed
354 355 356
  void MulHigh32(Register dst, Register src1, const MemOperand& src2);
  void MulHigh32(Register dst, Register src1, Register src2);
  void MulHigh32(Register dst, Register src1, const Operand& src2);
357 358 359
  void MulHighU32(Register dst, Register src1, const MemOperand& src2);
  void MulHighU32(Register dst, Register src1, Register src2);
  void MulHighU32(Register dst, Register src1, const Operand& src2);
jyan's avatar
jyan committed
360 361 362 363 364
  void Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
                                    const MemOperand& src2);
  void Mul32WithOverflowIfCCUnequal(Register dst, Register src1, Register src2);
  void Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
                                    const Operand& src2);
365 366 367
  void Mul64(Register dst, const MemOperand& src1);
  void Mul64(Register dst, Register src1);
  void Mul64(Register dst, const Operand& src1);
jyan's avatar
jyan committed
368
  void MulPWithCondition(Register dst, Register src1, Register src2);
369 370 371

  // Divide
  void DivP(Register dividend, Register divider);
372 373 374 375
  void Div32(Register dst, Register src1, const MemOperand& src2);
  void Div32(Register dst, Register src1, Register src2);
  void DivU32(Register dst, Register src1, const MemOperand& src2);
  void DivU32(Register dst, Register src1, Register src2);
376 377 378 379
  void Div64(Register dst, Register src1, const MemOperand& src2);
  void Div64(Register dst, Register src1, Register src2);
  void DivU64(Register dst, Register src1, const MemOperand& src2);
  void DivU64(Register dst, Register src1, Register src2);
380 381 382 383 384 385

  // Mod
  void Mod32(Register dst, Register src1, const MemOperand& src2);
  void Mod32(Register dst, Register src1, Register src2);
  void ModU32(Register dst, Register src1, const MemOperand& src2);
  void ModU32(Register dst, Register src1, Register src2);
386 387 388 389
  void Mod64(Register dst, Register src1, const MemOperand& src2);
  void Mod64(Register dst, Register src1, Register src2);
  void ModU64(Register dst, Register src1, const MemOperand& src2);
  void ModU64(Register dst, Register src1, Register src2);
390

jyan's avatar
jyan committed
391 392 393 394
  // Square root
  void Sqrt(DoubleRegister result, DoubleRegister input);
  void Sqrt(DoubleRegister result, const MemOperand& input);

395 396 397 398 399 400 401
  // Compare
  void Cmp32(Register src1, Register src2);
  void CmpP(Register src1, Register src2);
  void Cmp32(Register dst, const Operand& opnd);
  void CmpP(Register dst, const Operand& opnd);
  void Cmp32(Register dst, const MemOperand& opnd);
  void CmpP(Register dst, const MemOperand& opnd);
402
  void CmpAndSwap(Register old_val, Register new_val, const MemOperand& opnd);
403
  void CmpAndSwap64(Register old_val, Register new_val, const MemOperand& opnd);
404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419

  // Compare Logical
  void CmpLogical32(Register src1, Register src2);
  void CmpLogicalP(Register src1, Register src2);
  void CmpLogical32(Register src1, const Operand& opnd);
  void CmpLogicalP(Register src1, const Operand& opnd);
  void CmpLogical32(Register dst, const MemOperand& opnd);
  void CmpLogicalP(Register dst, const MemOperand& opnd);

  // Compare Logical Byte (CLI/CLIY)
  void CmpLogicalByte(const MemOperand& mem, const Operand& imm);

  // Load 32bit
  void Load(Register dst, const MemOperand& opnd);
  void Load(Register dst, const Operand& opnd);
  void LoadW(Register dst, const MemOperand& opnd, Register scratch = no_reg);
420
  void LoadW(Register dst, Register src);
421
  void LoadlW(Register dst, const MemOperand& opnd, Register scratch = no_reg);
422
  void LoadlW(Register dst, Register src);
423 424
  void LoadLogicalHalfWordP(Register dst, const MemOperand& opnd);
  void LoadLogicalHalfWordP(Register dst, Register src);
425
  void LoadB(Register dst, const MemOperand& opnd);
426
  void LoadB(Register dst, Register src);
427
  void LoadlB(Register dst, const MemOperand& opnd);
428
  void LoadlB(Register dst, Register src);
429

430 431 432
  void LoadLogicalReversedWordP(Register dst, const MemOperand& opnd);
  void LoadLogicalReversedHalfWordP(Register dst, const MemOperand& opnd);

433 434 435 436 437 438 439 440 441 442 443 444 445
  // Load And Test
  void LoadAndTest32(Register dst, Register src);
  void LoadAndTestP_ExtendSrc(Register dst, Register src);
  void LoadAndTestP(Register dst, Register src);

  void LoadAndTest32(Register dst, const MemOperand& opnd);
  void LoadAndTestP(Register dst, const MemOperand& opnd);

  // Load Floating Point
  void LoadDouble(DoubleRegister dst, const MemOperand& opnd);
  void LoadFloat32(DoubleRegister dst, const MemOperand& opnd);
  void LoadFloat32ConvertToDouble(DoubleRegister dst, const MemOperand& mem);

446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
  void AddFloat32(DoubleRegister dst, const MemOperand& opnd,
                  DoubleRegister scratch);
  void AddFloat64(DoubleRegister dst, const MemOperand& opnd,
                  DoubleRegister scratch);
  void SubFloat32(DoubleRegister dst, const MemOperand& opnd,
                  DoubleRegister scratch);
  void SubFloat64(DoubleRegister dst, const MemOperand& opnd,
                  DoubleRegister scratch);
  void MulFloat32(DoubleRegister dst, const MemOperand& opnd,
                  DoubleRegister scratch);
  void MulFloat64(DoubleRegister dst, const MemOperand& opnd,
                  DoubleRegister scratch);
  void DivFloat32(DoubleRegister dst, const MemOperand& opnd,
                  DoubleRegister scratch);
  void DivFloat64(DoubleRegister dst, const MemOperand& opnd,
                  DoubleRegister scratch);
  void LoadFloat32ToDouble(DoubleRegister dst, const MemOperand& opnd,
                           DoubleRegister scratch);

465 466 467
  // Load On Condition
  void LoadOnConditionP(Condition cond, Register dst, Register src);

468 469 470
  void LoadPositiveP(Register result, Register input);
  void LoadPositive32(Register result, Register input);

471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
  // Store Floating Point
  void StoreDouble(DoubleRegister dst, const MemOperand& opnd);
  void StoreFloat32(DoubleRegister dst, const MemOperand& opnd);
  void StoreDoubleAsFloat32(DoubleRegister src, const MemOperand& mem,
                            DoubleRegister scratch);

  void Branch(Condition c, const Operand& opnd);
  void BranchOnCount(Register r1, Label* l);

  // Shifts
  void ShiftLeft(Register dst, Register src, Register val);
  void ShiftLeft(Register dst, Register src, const Operand& val);
  void ShiftRight(Register dst, Register src, Register val);
  void ShiftRight(Register dst, Register src, const Operand& val);
  void ShiftLeftArith(Register dst, Register src, Register shift);
  void ShiftLeftArith(Register dst, Register src, const Operand& val);
  void ShiftRightArith(Register dst, Register src, Register shift);
  void ShiftRightArith(Register dst, Register src, const Operand& val);

  void ClearRightImm(Register dst, Register src, const Operand& val);

  // Bitwise operations
  void And(Register dst, Register src);
  void AndP(Register dst, Register src);
  void And(Register dst, Register src1, Register src2);
  void AndP(Register dst, Register src1, Register src2);
  void And(Register dst, const MemOperand& opnd);
  void AndP(Register dst, const MemOperand& opnd);
  void And(Register dst, const Operand& opnd);
  void AndP(Register dst, const Operand& opnd);
  void And(Register dst, Register src, const Operand& opnd);
  void AndP(Register dst, Register src, const Operand& opnd);
  void Or(Register dst, Register src);
  void OrP(Register dst, Register src);
  void Or(Register dst, Register src1, Register src2);
  void OrP(Register dst, Register src1, Register src2);
  void Or(Register dst, const MemOperand& opnd);
  void OrP(Register dst, const MemOperand& opnd);
  void Or(Register dst, const Operand& opnd);
  void OrP(Register dst, const Operand& opnd);
  void Or(Register dst, Register src, const Operand& opnd);
  void OrP(Register dst, Register src, const Operand& opnd);
  void Xor(Register dst, Register src);
  void XorP(Register dst, Register src);
  void Xor(Register dst, Register src1, Register src2);
  void XorP(Register dst, Register src1, Register src2);
  void Xor(Register dst, const MemOperand& opnd);
  void XorP(Register dst, const MemOperand& opnd);
  void Xor(Register dst, const Operand& opnd);
  void XorP(Register dst, const Operand& opnd);
  void Xor(Register dst, Register src, const Operand& opnd);
  void XorP(Register dst, Register src, const Operand& opnd);
  void Popcnt32(Register dst, Register src);
524 525 526
  void Not32(Register dst, Register src = no_reg);
  void Not64(Register dst, Register src = no_reg);
  void NotP(Register dst, Register src = no_reg);
527 528 529 530 531 532 533

#ifdef V8_TARGET_ARCH_S390X
  void Popcnt64(Register dst, Register src);
#endif

  void mov(Register dst, const Operand& src);

534 535 536 537 538 539
  void CleanUInt32(Register x) {
#ifdef V8_TARGET_ARCH_S390X
    llgfr(x, x);
#endif
  }

540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555

  void push(Register src) {
    lay(sp, MemOperand(sp, -kPointerSize));
    StoreP(src, MemOperand(sp));
  }

  void pop(Register dst) {
    LoadP(dst, MemOperand(sp));
    la(sp, MemOperand(sp, kPointerSize));
  }

  void pop() { la(sp, MemOperand(sp, kPointerSize)); }

  void Push(Register src) { push(src); }

  // Push a handle.
556
  void Push(Handle<HeapObject> handle);
557
  void Push(Smi smi);
558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585

  // Push two registers.  Pushes leftmost register first (to highest address).
  void Push(Register src1, Register src2) {
    lay(sp, MemOperand(sp, -kPointerSize * 2));
    StoreP(src1, MemOperand(sp, kPointerSize));
    StoreP(src2, MemOperand(sp, 0));
  }

  // Push three registers.  Pushes leftmost register first (to highest address).
  void Push(Register src1, Register src2, Register src3) {
    lay(sp, MemOperand(sp, -kPointerSize * 3));
    StoreP(src1, MemOperand(sp, kPointerSize * 2));
    StoreP(src2, MemOperand(sp, kPointerSize));
    StoreP(src3, MemOperand(sp, 0));
  }

  // Push four registers.  Pushes leftmost register first (to highest address).
  void Push(Register src1, Register src2, Register src3, Register src4) {
    lay(sp, MemOperand(sp, -kPointerSize * 4));
    StoreP(src1, MemOperand(sp, kPointerSize * 3));
    StoreP(src2, MemOperand(sp, kPointerSize * 2));
    StoreP(src3, MemOperand(sp, kPointerSize));
    StoreP(src4, MemOperand(sp, 0));
  }

  // Push five registers.  Pushes leftmost register first (to highest address).
  void Push(Register src1, Register src2, Register src3, Register src4,
            Register src5) {
586 587 588 589 590 591 592 593 594 595
    DCHECK(src1 != src2);
    DCHECK(src1 != src3);
    DCHECK(src2 != src3);
    DCHECK(src1 != src4);
    DCHECK(src2 != src4);
    DCHECK(src3 != src4);
    DCHECK(src1 != src5);
    DCHECK(src2 != src5);
    DCHECK(src3 != src5);
    DCHECK(src4 != src5);
596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641

    lay(sp, MemOperand(sp, -kPointerSize * 5));
    StoreP(src1, MemOperand(sp, kPointerSize * 4));
    StoreP(src2, MemOperand(sp, kPointerSize * 3));
    StoreP(src3, MemOperand(sp, kPointerSize * 2));
    StoreP(src4, MemOperand(sp, kPointerSize));
    StoreP(src5, MemOperand(sp, 0));
  }

  void Pop(Register dst) { pop(dst); }

  // Pop two registers. Pops rightmost register first (from lower address).
  void Pop(Register src1, Register src2) {
    LoadP(src2, MemOperand(sp, 0));
    LoadP(src1, MemOperand(sp, kPointerSize));
    la(sp, MemOperand(sp, 2 * kPointerSize));
  }

  // Pop three registers.  Pops rightmost register first (from lower address).
  void Pop(Register src1, Register src2, Register src3) {
    LoadP(src3, MemOperand(sp, 0));
    LoadP(src2, MemOperand(sp, kPointerSize));
    LoadP(src1, MemOperand(sp, 2 * kPointerSize));
    la(sp, MemOperand(sp, 3 * kPointerSize));
  }

  // Pop four registers.  Pops rightmost register first (from lower address).
  void Pop(Register src1, Register src2, Register src3, Register src4) {
    LoadP(src4, MemOperand(sp, 0));
    LoadP(src3, MemOperand(sp, kPointerSize));
    LoadP(src2, MemOperand(sp, 2 * kPointerSize));
    LoadP(src1, MemOperand(sp, 3 * kPointerSize));
    la(sp, MemOperand(sp, 4 * kPointerSize));
  }

  // Pop five registers.  Pops rightmost register first (from lower address).
  void Pop(Register src1, Register src2, Register src3, Register src4,
           Register src5) {
    LoadP(src5, MemOperand(sp, 0));
    LoadP(src4, MemOperand(sp, kPointerSize));
    LoadP(src3, MemOperand(sp, 2 * kPointerSize));
    LoadP(src2, MemOperand(sp, 3 * kPointerSize));
    LoadP(src1, MemOperand(sp, 4 * kPointerSize));
    la(sp, MemOperand(sp, 5 * kPointerSize));
  }

642 643 644 645 646 647 648 649
  // Push a fixed frame, consisting of lr, fp, constant pool.
  void PushCommonFrame(Register marker_reg = no_reg);

  // Push a standard frame, consisting of lr, fp, constant pool,
  // context and JS function
  void PushStandardFrame(Register function_reg);

  void PopCommonFrame(Register marker_reg = no_reg);
650 651 652 653 654

  // Restore caller's frame pointer and return address prior to being
  // overwritten by tail call stack preparation.
  void RestoreFrameStateForTailCall();

655
  void InitializeRootRegister() {
656 657
    ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
    mov(kRootRegister, Operand(isolate_root));
658
  }
659 660 661 662 663 664 665 666 667

  // If the value is a NaN, canonicalize the value else, do nothing.
  void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
  void CanonicalizeNaN(const DoubleRegister value) {
    CanonicalizeNaN(value, value);
  }

  // Converts the integer (untagged smi) in |src| to a double, storing
  // the result to |dst|
668
  void ConvertIntToDouble(DoubleRegister dst, Register src);
669 670 671

  // Converts the unsigned integer (untagged smi) in |src| to
  // a double, storing the result to |dst|
672
  void ConvertUnsignedIntToDouble(DoubleRegister dst, Register src);
673 674 675

  // Converts the integer (untagged smi) in |src| to
  // a float, storing the result in |dst|
676
  void ConvertIntToFloat(DoubleRegister dst, Register src);
677 678 679

  // Converts the unsigned integer (untagged smi) in |src| to
  // a float, storing the result in |dst|
680
  void ConvertUnsignedIntToFloat(DoubleRegister dst, Register src);
681

682 683 684 685
  void ConvertInt64ToFloat(DoubleRegister double_dst, Register src);
  void ConvertInt64ToDouble(DoubleRegister double_dst, Register src);
  void ConvertUnsignedInt64ToFloat(DoubleRegister double_dst, Register src);
  void ConvertUnsignedInt64ToDouble(DoubleRegister double_dst, Register src);
686 687 688 689 690 691 692

  void MovIntToFloat(DoubleRegister dst, Register src);
  void MovFloatToInt(Register dst, DoubleRegister src);
  void MovDoubleToInt64(Register dst, DoubleRegister src);
  void MovInt64ToDouble(DoubleRegister dst, Register src);
  // Converts the double_input to an integer.  Note that, upon return,
  // the contents of double_dst will also hold the fixed point representation.
693 694
  void ConvertFloat32ToInt64(const Register dst,
                             const DoubleRegister double_input,
695 696 697 698
                             FPRoundingMode rounding_mode = kRoundToZero);

  // Converts the double_input to an integer.  Note that, upon return,
  // the contents of double_dst will also hold the fixed point representation.
699 700 701 702 703
  void ConvertDoubleToInt64(const Register dst,
                            const DoubleRegister double_input,
                            FPRoundingMode rounding_mode = kRoundToZero);
  void ConvertDoubleToInt32(const Register dst,
                            const DoubleRegister double_input,
704 705
                            FPRoundingMode rounding_mode = kRoundToZero);

706 707
  void ConvertFloat32ToInt32(const Register result,
                             const DoubleRegister double_input,
708
                             FPRoundingMode rounding_mode);
709
  void ConvertFloat32ToUnsignedInt32(
710
      const Register result, const DoubleRegister double_input,
711 712 713 714
      FPRoundingMode rounding_mode = kRoundToZero);
  // Converts the double_input to an unsigned integer.  Note that, upon return,
  // the contents of double_dst will also hold the fixed point representation.
  void ConvertDoubleToUnsignedInt64(
715 716 717 718
      const Register dst, const DoubleRegister double_input,
      FPRoundingMode rounding_mode = kRoundToZero);
  void ConvertDoubleToUnsignedInt32(
      const Register dst, const DoubleRegister double_input,
719 720
      FPRoundingMode rounding_mode = kRoundToZero);
  void ConvertFloat32ToUnsignedInt64(
721
      const Register result, const DoubleRegister double_input,
722 723
      FPRoundingMode rounding_mode = kRoundToZero);

724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
#if !V8_TARGET_ARCH_S390X
  void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
                     Register src_high, Register scratch, Register shift);
  void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
                     Register src_high, uint32_t shift);
  void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
                      Register src_high, Register scratch, Register shift);
  void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
                      Register src_high, uint32_t shift);
  void ShiftRightArithPair(Register dst_low, Register dst_high,
                           Register src_low, Register src_high,
                           Register scratch, Register shift);
  void ShiftRightArithPair(Register dst_low, Register dst_high,
                           Register src_low, Register src_high, uint32_t shift);
#endif

740
  // Generates function and stub prologue code.
741 742
  void StubPrologue(StackFrame::Type type, Register base = no_reg,
                    int prologue_offset = 0);
743
  void Prologue(Register base, int prologue_offset = 0);
744 745 746 747 748 749 750 751 752 753 754

  // Get the actual activation frame alignment for target environment.
  static int ActivationFrameAlignment();
  // ----------------------------------------------------------------
  // new S390 macro-assembler interfaces that are slightly higher level
  // than assembler-s390 and may generate variable length sequences

  // load a literal signed int value <value> to GPR <dst>
  void LoadIntLiteral(Register dst, int value);

  // load an SMI value <value> to GPR <dst>
755
  void LoadSmiLiteral(Register dst, Smi smi);
756 757 758 759 760 761 762 763 764 765

  // load a literal double value <value> to FPR <result>
  void LoadDoubleLiteral(DoubleRegister result, double value, Register scratch);
  void LoadDoubleLiteral(DoubleRegister result, uint64_t value,
                         Register scratch);

  void LoadFloat32Literal(DoubleRegister result, float value, Register scratch);

  void StoreW(Register src, const MemOperand& mem, Register scratch = no_reg);

766 767
  void LoadHalfWordP(Register dst, Register src);

768 769 770 771 772 773
  void LoadHalfWordP(Register dst, const MemOperand& mem,
                     Register scratch = no_reg);

  void StoreHalfWord(Register src, const MemOperand& mem,
                     Register scratch = r0);
  void StoreByte(Register src, const MemOperand& mem, Register scratch = r0);
774
  void CmpSmiLiteral(Register src1, Smi smi, Register scratch);
775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791

  // Set new rounding mode RN to FPSCR
  void SetRoundingMode(FPRoundingMode RN);

  // reset rounding mode to default (kRoundToNearest)
  void ResetRoundingMode();

  // These exist to provide portability between 32 and 64bit
  void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
  void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
  void StoreP(const MemOperand& mem, const Operand& opnd,
              Register scratch = no_reg);
  void LoadMultipleP(Register dst1, Register dst2, const MemOperand& mem);
  void StoreMultipleP(Register dst1, Register dst2, const MemOperand& mem);
  void LoadMultipleW(Register dst1, Register dst2, const MemOperand& mem);
  void StoreMultipleW(Register dst1, Register dst2, const MemOperand& mem);

792 793 794 795 796 797 798 799 800 801 802 803 804 805 806
  void SwapP(Register src, Register dst, Register scratch);
  void SwapP(Register src, MemOperand dst, Register scratch);
  void SwapP(MemOperand src, MemOperand dst, Register scratch_0,
             Register scratch_1);
  void SwapFloat32(DoubleRegister src, DoubleRegister dst,
                   DoubleRegister scratch);
  void SwapFloat32(DoubleRegister src, MemOperand dst, DoubleRegister scratch);
  void SwapFloat32(MemOperand src, MemOperand dst, DoubleRegister scratch_0,
                   DoubleRegister scratch_1);
  void SwapDouble(DoubleRegister src, DoubleRegister dst,
                  DoubleRegister scratch);
  void SwapDouble(DoubleRegister src, MemOperand dst, DoubleRegister scratch);
  void SwapDouble(MemOperand src, MemOperand dst, DoubleRegister scratch_0,
                  DoubleRegister scratch_1);

807 808 809 810 811 812 813 814
  // Cleanse pointer address on 31bit by zero out top  bit.
  // This is a NOP on 64-bit.
  void CleanseP(Register src) {
#if (V8_HOST_ARCH_S390 && !(V8_TARGET_ARCH_S390X))
    nilh(src, Operand(0x7FFF));
#endif
  }

815 816 817 818
  void PrepareForTailCall(const ParameterCount& callee_args_count,
                          Register caller_args_count_reg, Register scratch0,
                          Register scratch1);

819 820
  // ---------------------------------------------------------------------------
  // Runtime calls
821

822 823 824
  // Call a runtime routine. This expects {centry} to contain a fitting CEntry
  // builtin for the target runtime function and uses an indirect call.
  void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
825

826 827 828 829 830 831 832 833 834 835 836 837 838
  // Before calling a C-function from generated code, align arguments on stack.
  // After aligning the frame, non-register arguments must be stored in
  // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
  // are word sized. If double arguments are used, this function assumes that
  // all double arguments are stored before core registers; otherwise the
  // correct alignment of the double values is not guaranteed.
  // Some compilers/platforms require the stack to be aligned when calling
  // C++ code.
  // Needs a scratch register to do some arithmetic. This register will be
  // trashed.
  void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
                            Register scratch);
  void PrepareCallCFunction(int num_reg_arguments, Register scratch);
839

840 841 842 843 844 845 846
  // There are two ways of passing double arguments on ARM, depending on
  // whether soft or hard floating point ABI is used. These functions
  // abstract parameter passing for the three different ways we call
  // C functions from generated code.
  void MovToFloatParameter(DoubleRegister src);
  void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
  void MovToFloatResult(DoubleRegister src);
847

848 849 850 851 852 853 854 855 856 857 858
  // Calls a C function and cleans up the space for arguments allocated
  // by PrepareCallCFunction. The called function is not allowed to trigger a
  // garbage collection, since that might move the code and invalidate the
  // return address (unless this is somehow accounted for by the called
  // function).
  void CallCFunction(ExternalReference function, int num_arguments);
  void CallCFunction(Register function, int num_arguments);
  void CallCFunction(ExternalReference function, int num_reg_arguments,
                     int num_double_arguments);
  void CallCFunction(Register function, int num_reg_arguments,
                     int num_double_arguments);
859

860 861
  void MovFromFloatParameter(DoubleRegister dst);
  void MovFromFloatResult(DoubleRegister dst);
862

863 864
  // Emit code for a truncating division by a constant. The dividend register is
  // unchanged and ip gets clobbered. Dividend and result must be different.
865
  void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
866
                         DoubleRegister double_input, StubCallMode stub_mode);
867 868
  void TryInlineTruncateDoubleToI(Register result, DoubleRegister double_input,
                                  Label* done);
869

870 871 872 873 874
  // ---------------------------------------------------------------------------
  // Debugging

  // Calls Abort(msg) if the condition cond is not satisfied.
  // Use --debug_code to enable.
875
  void Assert(Condition cond, AbortReason reason, CRegister cr = cr7);
876 877

  // Like Assert(), but always enabled.
878
  void Check(Condition cond, AbortReason reason, CRegister cr = cr7);
879 880

  // Print a message to stdout and abort execution.
881
  void Abort(AbortReason reason);
882 883

  inline bool AllowThisStubCall(CodeStub* stub);
884 885

  // ---------------------------------------------------------------------------
886 887 888 889
  // Bit testing/extraction
  //
  // Bit numbering is such that the least significant bit is bit 0
  // (for consistency between 32/64-bit).
890

891 892 893 894
  // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
  // and place them into the least significant bits of dst.
  inline void ExtractBitRange(Register dst, Register src, int rangeStart,
                              int rangeEnd) {
895
    DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerSystemPointer);
896

897 898 899 900 901
    // Try to use RISBG if possible.
    if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
      int shiftAmount = (64 - rangeEnd) % 64;  // Convert to shift left.
      int endBit = 63;  // End is always LSB after shifting.
      int startBit = 63 - rangeStart + rangeEnd;
902 903
      RotateInsertSelectBits(dst, src, Operand(startBit), Operand(endBit),
            Operand(shiftAmount), true);
904 905 906
    } else {
      if (rangeEnd > 0)  // Don't need to shift if rangeEnd is zero.
        ShiftRightP(dst, src, Operand(rangeEnd));
907
      else if (dst != src)  // If we didn't shift, we might need to copy
908 909 910 911 912 913 914 915 916 917 918 919 920
        LoadRR(dst, src);
      int width = rangeStart - rangeEnd + 1;
#if V8_TARGET_ARCH_S390X
      uint64_t mask = (static_cast<uint64_t>(1) << width) - 1;
      nihf(dst, Operand(mask >> 32));
      nilf(dst, Operand(mask & 0xFFFFFFFF));
      ltgr(dst, dst);
#else
      uint32_t mask = (1 << width) - 1;
      AndP(dst, Operand(mask));
#endif
    }
  }
921

922 923
  inline void ExtractBit(Register dst, Register src, uint32_t bitNumber) {
    ExtractBitRange(dst, src, bitNumber, bitNumber);
924 925
  }

926 927 928 929
  // Extract consecutive bits (defined by mask) from src and place them
  // into the least significant bits of dst.
  inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
                             RCBit rc = LeaveRC) {
930
    int start = kBitsPerSystemPointer - 1;
931 932
    int end;
    uintptr_t bit = (1L << start);
933

934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949
    while (bit && (mask & bit) == 0) {
      start--;
      bit >>= 1;
    }
    end = start;
    bit >>= 1;

    while (bit && (mask & bit)) {
      end--;
      bit >>= 1;
    }

    // 1-bits in mask must be contiguous
    DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);

    ExtractBitRange(dst, src, start, end);
950 951
  }

952 953 954 955
  // Test single bit in value.
  inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
    ExtractBitRange(scratch, value, bitNumber, bitNumber);
  }
956

957 958 959 960 961 962
  // Test consecutive bit range in value.  Range is defined by
  // rangeStart - rangeEnd.
  inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
                           Register scratch = r0) {
    ExtractBitRange(scratch, value, rangeStart, rangeEnd);
  }
963

964 965 966 967 968 969
  // Test consecutive bit range in value.  Range is defined by mask.
  inline void TestBitMask(Register value, uintptr_t mask,
                          Register scratch = r0) {
    ExtractBitMask(scratch, value, mask, SetRC);
  }
  inline void TestIfSmi(Register value) { tmll(value, Operand(1)); }
970

971 972 973 974 975 976 977 978 979 980
  inline void TestIfSmi(MemOperand value) {
    if (is_uint12(value.offset())) {
      tm(value, Operand(1));
    } else if (is_int20(value.offset())) {
      tmy(value, Operand(1));
    } else {
      LoadB(r0, value);
      tmll(r0, Operand(1));
    }
  }
981

982 983 984 985
  inline void TestIfInt32(Register value) {
    // High bits must be identical to fit into an 32-bit integer
    cgfr(value, value);
  }
986
  void SmiUntag(Register reg, int scale = 0) { SmiUntag(reg, reg, scale); }
987

988 989 990 991 992 993 994 995
  void SmiUntag(Register dst, Register src, int scale = 0) {
    if (scale > kSmiShift) {
      ShiftLeftP(dst, src, Operand(scale - kSmiShift));
    } else if (scale < kSmiShift) {
      ShiftRightArithP(dst, src, Operand(kSmiShift - scale));
    } else {
      // do nothing
    }
996
  }
997

998 999 1000 1001 1002 1003 1004 1005 1006
  // Activation support.
  void EnterFrame(StackFrame::Type type,
                  bool load_constant_pool_pointer_reg = false);
  // Returns the pc offset at which the frame ends.
  int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);

  void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
                     Label* condition_met);

1007
  void ResetSpeculationPoisonRegister();
1008
  void ComputeCodeStartAddress(Register dst);
1009
  void LoadPC(Register dst);
1010

1011 1012 1013 1014 1015
  // Generates an instruction sequence s.t. the return address points to the
  // instruction following the call.
  // The return address on the stack is used by frame iteration.
  void StoreReturnAddressAndCall(Register target);

1016 1017 1018 1019 1020 1021
 private:
  static const int kSmiShift = kSmiTagSize + kSmiShiftSize;

  void CallCFunctionHelper(Register function, int num_reg_arguments,
                           int num_double_arguments);

1022 1023 1024 1025 1026
  void CallRecordWriteStub(Register object, Register address,
                           RememberedSetAction remembered_set_action,
                           SaveFPRegsMode fp_mode, Handle<Code> code_target,
                           Address wasm_target);

1027
  void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
1028 1029 1030 1031 1032
  int CalculateStackPassedWords(int num_reg_arguments,
                                int num_double_arguments);
};

// MacroAssembler implements a collection of frequently used macros.
1033
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
1034
 public:
1035 1036 1037
  MacroAssembler(const AssemblerOptions& options, void* buffer, int size)
      : TurboAssembler(options, buffer, size) {}

1038
  MacroAssembler(Isolate* isolate, void* buffer, int size,
1039
                 CodeObjectRequired create_code_object)
1040
      : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
1041
                       size, create_code_object) {}
1042

1043 1044
  MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
                 void* buffer, int size, CodeObjectRequired create_code_object);
1045 1046 1047

  // Call a code stub.
  void TailCallStub(CodeStub* stub, Condition cond = al);
1048

1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
  void CallStub(CodeStub* stub, Condition cond = al);
  void CallRuntime(const Runtime::Function* f, int num_arguments,
                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
  void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
    const Runtime::Function* function = Runtime::FunctionForId(fid);
    CallRuntime(function, function->nargs, kSaveFPRegs);
  }

  // Convenience function: Same as above, but takes the fid instead.
  void CallRuntime(Runtime::FunctionId fid,
                   SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
    const Runtime::Function* function = Runtime::FunctionForId(fid);
    CallRuntime(function, function->nargs, save_doubles);
  }

  // Convenience function: Same as above, but takes the fid instead.
  void CallRuntime(Runtime::FunctionId fid, int num_arguments,
                   SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
    CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
  }

  // Convenience function: tail call a runtime routine (jump).
  void TailCallRuntime(Runtime::FunctionId fid);
1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093

  // ---------------------------------------------------------------------------
  // Support functions.

  // Compare object type for heap object.  heap_object contains a non-Smi
  // whose object type should be compared with the given type.  This both
  // sets the flags and leaves the object type in the type_reg register.
  // It leaves the map in the map register (unless the type_reg and map register
  // are the same register).  It leaves the heap object in the heap_object
  // register unless the heap_object register is the same register as one of the
  // other registers.
  // Type_reg can be no_reg. In that case ip is used.
  void CompareObjectType(Register heap_object, Register map, Register type_reg,
                         InstanceType type);

  // Compare instance type in a map.  map contains a valid map object whose
  // object type should be compared with the given type.  This both
  // sets the flags and leaves the object type in the type_reg register.
  void CompareInstanceType(Register map, Register type_reg, InstanceType type);

  // Compare the object in a register to a value from the root list.
  // Uses the ip register as scratch.
1094 1095
  void CompareRoot(Register obj, RootIndex index);
  void PushRoot(RootIndex index) {
1096 1097 1098 1099
    LoadRoot(r0, index);
    Push(r0);
  }

1100 1101 1102 1103
  // Jump to a runtime routine.
  void JumpToExternalReference(const ExternalReference& builtin,
                               bool builtin_exit_frame = false);

1104
  // Generates a trampoline to jump to the off-heap instruction stream.
1105
  void JumpToInstructionStream(Address entry);
1106

1107
  // Compare the object in a register to a value and jump if they are equal.
1108
  void JumpIfRoot(Register with, RootIndex index, Label* if_equal) {
1109 1110 1111 1112 1113
    CompareRoot(with, index);
    beq(if_equal);
  }

  // Compare the object in a register to a value and jump if they are not equal.
1114
  void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) {
1115 1116 1117 1118 1119 1120 1121 1122 1123
    CompareRoot(with, index);
    bne(if_not_equal);
  }

  // Try to convert a double to a signed 32-bit integer.
  // CR_EQ in cr7 is set and result assigned if the conversion is exact.
  void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
                             Register scratch, DoubleRegister double_scratch);

1124 1125 1126 1127
  // ---------------------------------------------------------------------------
  // In-place weak references.
  void LoadWeakValue(Register out, Register in, Label* target_if_cleared);

1128
  // ---------------------------------------------------------------------------
1129
  // StatsCounter support
1130

1131 1132 1133 1134 1135 1136
  void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
                        Register scratch2);
  void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
                        Register scratch2);
  // ---------------------------------------------------------------------------
  // JavaScript invokes
1137

1138 1139 1140 1141
  // Set up call kind marking in ecx. The method takes ecx as an
  // explicit first parameter to make the code more readable at the
  // call sites.
  // void SetCallKind(Register dst, CallKind kind);
1142

1143 1144 1145 1146 1147
  // Removes current frame and its arguments from the stack preserving
  // the arguments and a return address pushed to the stack for the next call.
  // Both |callee_args_count| and |caller_args_count_reg| do not include
  // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
  // is trashed.
1148

1149 1150 1151
  // Invoke the JavaScript function code by either calling or jumping.
  void InvokeFunctionCode(Register function, Register new_target,
                          const ParameterCount& expected,
1152
                          const ParameterCount& actual, InvokeFlag flag);
1153

1154 1155 1156 1157
  // On function call, call into the debugger if necessary.
  void CheckDebugHook(Register fun, Register new_target,
                      const ParameterCount& expected,
                      const ParameterCount& actual);
1158

1159 1160 1161
  // Invoke the JavaScript function in the given register. Changes the
  // current context to the context in the function before invoking.
  void InvokeFunction(Register function, Register new_target,
1162
                      const ParameterCount& actual, InvokeFlag flag);
1163

1164
  void InvokeFunction(Register function, const ParameterCount& expected,
1165
                      const ParameterCount& actual, InvokeFlag flag);
1166

1167 1168
  // Frame restart support
  void MaybeDropFrames();
1169

1170
  // Exception handling
1171

1172 1173
  // Push a new stack handler and link into stack handler chain.
  void PushStackHandler();
1174

1175 1176 1177
  // Unlink the stack handler on top of the stack from the stack handler chain.
  // Must preserve the result register.
  void PopStackHandler();
1178

1179 1180 1181 1182 1183
  // Enter exit frame.
  // stack_space - extra stack space, used for parameters before call to C.
  // At least one slot (for the return address) should be provided.
  void EnterExitFrame(bool save_doubles, int stack_space = 1,
                      StackFrame::Type frame_type = StackFrame::EXIT);
1184

1185 1186 1187 1188 1189
  // Leave the current exit frame. Expects the return value in r0.
  // Expect the number of values, pushed prior to the exit frame, to
  // remove in a register (or no_reg, if there is nothing to remove).
  void LeaveExitFrame(bool save_doubles, Register argument_count,
                      bool argument_count_is_length = false);
1190

1191 1192 1193 1194
  // Load the global proxy from the current context.
  void LoadGlobalProxy(Register dst) {
    LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
  }
1195

1196
  void LoadNativeContextSlot(int index, Register dst);
1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207

  // ---------------------------------------------------------------------------
  // Smi utilities

  // Shift left by kSmiShift
  void SmiTag(Register reg) { SmiTag(reg, reg); }
  void SmiTag(Register dst, Register src) {
    ShiftLeftP(dst, src, Operand(kSmiShift));
  }

  void SmiToPtrArrayOffset(Register dst, Register src) {
1208 1209 1210 1211
#if V8_TARGET_ARCH_S390X
    STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
    ShiftRightArithP(dst, src, Operand(kSmiShift - kPointerSizeLog2));
#else
1212 1213
    STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
    ShiftLeftP(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
1214
#endif
1215 1216 1217 1218 1219 1220 1221 1222 1223
  }

  // Untag the source value into destination and jump if source is a smi.
  // Souce and destination can be the same register.
  void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);

  // Jump if either of the registers contain a non-smi.
  inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
    TestIfSmi(value);
1224
    bne(not_smi_label /*, cr0*/);
1225 1226 1227 1228 1229 1230 1231 1232
  }
  // Jump if either of the registers contain a smi.
  void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);

  // Abort execution if argument is a smi, enabled via --debug-code.
  void AssertNotSmi(Register object);
  void AssertSmi(Register object);

1233 1234 1235 1236 1237 1238
#if V8_TARGET_ARCH_S390X
  // Ensure it is permissible to read/write int value directly from
  // upper half of the smi.
  STATIC_ASSERT(kSmiTag == 0);
  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
#endif
1239 1240 1241 1242 1243 1244
#if V8_TARGET_LITTLE_ENDIAN
#define SmiWordOffset(offset) (offset + kPointerSize / 2)
#else
#define SmiWordOffset(offset) offset
#endif

1245 1246 1247 1248
  // Abort execution if argument is not a Constructor, enabled via --debug-code.
  void AssertConstructor(Register object, Register scratch);

  // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1249 1250 1251 1252 1253 1254
  void AssertFunction(Register object);

  // Abort execution if argument is not a JSBoundFunction,
  // enabled via --debug-code.
  void AssertBoundFunction(Register object);

1255
  // Abort execution if argument is not a JSGeneratorObject (or subclass),
1256
  // enabled via --debug-code.
1257
  void AssertGeneratorObject(Register object);
1258

1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272
  // Abort execution if argument is not undefined or an AllocationSite, enabled
  // via --debug-code.
  void AssertUndefinedOrAllocationSite(Register object, Register scratch);

  template <typename Field>
  void DecodeField(Register dst, Register src) {
    ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift);
  }

  template <typename Field>
  void DecodeField(Register reg) {
    DecodeField<Field>(reg, reg);
  }

1273 1274 1275 1276 1277
  // ---------------------------------------------------------------------------
  // GC Support

  void IncrementalMarkingRecordWriteHelper(Register object, Register value,
                                           Register address);
1278

1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293
  void CallJSEntry(Register target);
  static int CallSizeNotPredictableCodeSize(Address target,
                                            RelocInfo::Mode rmode,
                                            Condition cond = al);
  void JumpToJSEntry(Register target);

  // Notify the garbage collector that we wrote a pointer into an object.
  // |object| is the object being stored into, |value| is the object being
  // stored.  value and scratch registers are clobbered by the operation.
  // The offset is the offset from the start of the object, not the offset from
  // the tagged HeapObject pointer.  For use with FieldMemOperand(reg, off).
  void RecordWriteField(
      Register object, int offset, Register value, Register scratch,
      LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1294
      SmiCheck smi_check = INLINE_SMI_CHECK);
1295 1296 1297 1298 1299 1300 1301 1302

  // For a given |object| notify the garbage collector that the slot |address|
  // has been written.  |value| is the object being stored. The value and
  // address registers are clobbered by the operation.
  void RecordWrite(
      Register object, Register address, Register value,
      LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1303
      SmiCheck smi_check = INLINE_SMI_CHECK);
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313

  // Push and pop the registers that can hold pointers, as defined by the
  // RegList constant kSafepointSavedRegisters.
  void PushSafepointRegisters();
  void PopSafepointRegisters();

  void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
                          Register scratch = no_reg);
  void StoreRepresentation(Register src, const MemOperand& mem,
                           Representation r, Register scratch = no_reg);
1314

1315 1316
 private:
  static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1317 1318 1319
  // Helper functions for generating invokes.
  void InvokePrologue(const ParameterCount& expected,
                      const ParameterCount& actual, Label* done,
1320
                      bool* definitely_mismatches, InvokeFlag flag);
1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341

  // Compute memory operands for safepoint stack slots.
  static int SafepointRegisterStackIndex(int reg_code);

  // Needs access to SafepointRegisterStackIndex for compiled frame
  // traversal.
  friend class StandardFrame;
};

// -----------------------------------------------------------------------------
// Static helper functions.

inline MemOperand ContextMemOperand(Register context, int index = 0) {
  return MemOperand(context, Context::SlotOffset(index));
}

inline MemOperand NativeContextMemOperand() {
  return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}

#define ACCESS_MASM(masm) masm->
1342

1343 1344 1345 1346
}  // namespace internal
}  // namespace v8

#endif  // V8_S390_MACRO_ASSEMBLER_S390_H_