codegen-x64.cc 21.8 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5 6
#include "src/x64/codegen-x64.h"

7
#if V8_TARGET_ARCH_X64
8

9 10
#include "src/codegen.h"
#include "src/macro-assembler.h"
11

12 13
namespace v8 {
namespace internal {
14

15 16 17
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.

18
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
19
  masm->EnterFrame(StackFrame::INTERNAL);
20
  DCHECK(!masm->has_frame());
21
  masm->set_has_frame(true);
22 23 24
}


25
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
26
  masm->LeaveFrame(StackFrame::INTERNAL);
27
  DCHECK(masm->has_frame());
28
  masm->set_has_frame(false);
29 30 31
}


32 33
#define __ masm.

34

35
UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
36
  size_t actual_size;
37 38
  byte* buffer =
      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
39
  if (buffer == nullptr) return nullptr;
40 41
  ExternalReference::InitializeMathExpData();

42 43
  MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
                      CodeObjectRequired::kNo);
44 45 46
  // xmm0: raw double input.
  XMMRegister input = xmm0;
  XMMRegister result = xmm1;
47 48
  __ pushq(rax);
  __ pushq(rbx);
49 50 51

  MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx);

52 53
  __ popq(rbx);
  __ popq(rax);
54
  __ Movsd(xmm0, result);
55 56 57 58
  __ Ret();

  CodeDesc desc;
  masm.GetCode(&desc);
59
  DCHECK(!RelocInfo::RequiresRelocation(desc));
60

61
  Assembler::FlushICache(isolate, buffer, actual_size);
62
  base::OS::ProtectCode(buffer, actual_size);
63
  return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
64 65 66
}


67
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
68 69
  size_t actual_size;
  // Allocate buffer in executable space.
70 71
  byte* buffer =
      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
72
  if (buffer == nullptr) return nullptr;
73

74
  MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
75
                      CodeObjectRequired::kNo);
76 77
  // xmm0: raw double input.
  // Move double input into registers.
78
  __ Sqrtsd(xmm0, xmm0);
79 80 81 82
  __ Ret();

  CodeDesc desc;
  masm.GetCode(&desc);
83
  DCHECK(!RelocInfo::RequiresRelocation(desc));
84

85
  Assembler::FlushICache(isolate, buffer, actual_size);
86
  base::OS::ProtectCode(buffer, actual_size);
87
  return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
88 89
}

90 91 92 93 94 95 96
#undef __

// -------------------------------------------------------------------------
// Code generators

#define __ ACCESS_MASM(masm)

97
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
98 99 100 101 102 103
    MacroAssembler* masm,
    Register receiver,
    Register key,
    Register value,
    Register target_map,
    AllocationSiteMode mode,
104
    Label* allocation_memento_found) {
105 106
  // Return address is on the stack.
  Register scratch = rdi;
107
  DCHECK(!AreAliased(receiver, key, value, target_map, scratch));
108

109
  if (mode == TRACK_ALLOCATION_SITE) {
110
    DCHECK(allocation_memento_found != NULL);
111 112
    __ JumpIfJSArrayHasAllocationMemento(
        receiver, scratch, allocation_memento_found);
113 114
  }

115
  // Set transitioned map.
116 117
  __ movp(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
  __ RecordWriteField(receiver,
118
                      HeapObject::kMapOffset,
119 120
                      target_map,
                      scratch,
121 122 123 124 125 126
                      kDontSaveFPRegs,
                      EMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);
}


127
void ElementsTransitionGenerator::GenerateSmiToDouble(
128 129 130 131 132 133 134 135
    MacroAssembler* masm,
    Register receiver,
    Register key,
    Register value,
    Register target_map,
    AllocationSiteMode mode,
    Label* fail) {
  // Return address is on the stack.
136 137 138 139
  DCHECK(receiver.is(rdx));
  DCHECK(key.is(rcx));
  DCHECK(value.is(rax));
  DCHECK(target_map.is(rbx));
140

141
  // The fail label is not actually used since we do not allocate.
142
  Label allocated, new_backing_store, only_change_map, done;
143

144
  if (mode == TRACK_ALLOCATION_SITE) {
145
    __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
146 147
  }

148 149
  // Check for empty arrays, which only require a map transition and no changes
  // to the backing store.
150
  __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
151 152
  __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
  __ j(equal, &only_change_map);
153 154

  __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
155 156 157 158 159 160 161 162 163
  if (kPointerSize == kDoubleSize) {
    // Check backing store for COW-ness. For COW arrays we have to
    // allocate a new backing store.
    __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
                   Heap::kFixedCOWArrayMapRootIndex);
    __ j(equal, &new_backing_store);
  } else {
    // For x32 port we have to allocate a new backing store as SMI size is
    // not equal with double size.
164
    DCHECK(kDoubleSize == 2 * kPointerSize);
165 166 167
    __ jmp(&new_backing_store);
  }

168 169 170 171 172 173
  // Check if the backing store is in new-space. If not, we need to allocate
  // a new one since the old one is in pointer-space.
  // If in new space, we can reuse the old backing store because it is
  // the same size.
  __ JumpIfNotInNewSpace(r8, rdi, &new_backing_store);

174
  __ movp(r14, r8);  // Destination array equals source array.
175 176 177 178 179 180

  // r8 : source FixedArray
  // r9 : elements array length
  // r14: destination FixedDoubleArray
  // Set backing store's map
  __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
181
  __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
182

183
  __ bind(&allocated);
184
  // Set transitioned map.
185
  __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
186 187 188 189 190 191 192 193
  __ RecordWriteField(rdx,
                      HeapObject::kMapOffset,
                      rbx,
                      rdi,
                      kDontSaveFPRegs,
                      EMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);

194 195
  // Convert smis to doubles and holes to hole NaNs.  The Array's length
  // remains unchanged.
196 197
  STATIC_ASSERT(FixedDoubleArray::kLengthOffset == FixedArray::kLengthOffset);
  STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
198

199
  Label loop, entry, convert_hole;
200
  __ movq(r15, bit_cast<int64_t, uint64_t>(kHoleNanInt64));
201 202
  // r15: the-hole NaN
  __ jmp(&entry);
203

204 205
  // Allocate new backing store.
  __ bind(&new_backing_store);
206
  __ leap(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
207
  __ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT);
208 209
  // Set backing store's map
  __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
210
  __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
211
  // Set receiver's backing store.
212 213
  __ movp(FieldOperand(rdx, JSObject::kElementsOffset), r14);
  __ movp(r11, r14);
214 215 216 217 218 219 220 221 222
  __ RecordWriteField(rdx,
                      JSObject::kElementsOffset,
                      r11,
                      r15,
                      kDontSaveFPRegs,
                      EMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);
  // Set backing store's length.
  __ Integer32ToSmi(r11, r9);
223
  __ movp(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
224 225
  __ jmp(&allocated);

226 227
  __ bind(&only_change_map);
  // Set transitioned map.
228
  __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
229 230 231 232 233 234 235 236 237
  __ RecordWriteField(rdx,
                      HeapObject::kMapOffset,
                      rbx,
                      rdi,
                      kDontSaveFPRegs,
                      OMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);
  __ jmp(&done);

238
  // Conversion loop.
239
  __ bind(&loop);
240
  __ movp(rbx,
241
          FieldOperand(r8, r9, times_pointer_size, FixedArray::kHeaderSize));
242 243 244 245
  // r9 : current element's index
  // rbx: current element (smi-tagged)
  __ JumpIfNotSmi(rbx, &convert_hole);
  __ SmiToInteger32(rbx, rbx);
246
  __ Cvtlsi2sd(xmm0, rbx);
247
  __ Movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), xmm0);
248 249
  __ jmp(&entry);
  __ bind(&convert_hole);
250 251 252

  if (FLAG_debug_code) {
    __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
253
    __ Assert(equal, kObjectFoundInSmiOnlyArray);
254 255
  }

256
  __ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
257
  __ bind(&entry);
258
  __ decp(r9);
259
  __ j(not_sign, &loop);
260 261

  __ bind(&done);
262 263 264 265
}


void ElementsTransitionGenerator::GenerateDoubleToObject(
266 267 268 269 270 271 272 273
    MacroAssembler* masm,
    Register receiver,
    Register key,
    Register value,
    Register target_map,
    AllocationSiteMode mode,
    Label* fail) {
  // Return address is on the stack.
274 275 276 277
  DCHECK(receiver.is(rdx));
  DCHECK(key.is(rcx));
  DCHECK(value.is(rax));
  DCHECK(target_map.is(rbx));
278

279
  Label loop, entry, convert_hole, gc_required, only_change_map;
280

281
  if (mode == TRACK_ALLOCATION_SITE) {
282
    __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
283 284
  }

285 286
  // Check for empty arrays, which only require a map transition and no changes
  // to the backing store.
287
  __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
288 289 290
  __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
  __ j(equal, &only_change_map);

291
  __ Push(rax);
292

293
  __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
294 295 296
  __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
  // r8 : source FixedDoubleArray
  // r9 : number of elements
297
  __ leap(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
298
  __ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
299
  // r11: destination FixedArray
300
  __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
301
  __ movp(FieldOperand(r11, HeapObject::kMapOffset), rdi);
302
  __ Integer32ToSmi(r14, r9);
303
  __ movp(FieldOperand(r11, FixedArray::kLengthOffset), r14);
304 305

  // Prepare for conversion loop.
306
  __ movq(rsi, bit_cast<int64_t, uint64_t>(kHoleNanInt64));
307 308 309
  __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
  // rsi: the-hole NaN
  // rdi: pointer to the-hole
310 311 312 313 314 315 316 317 318 319 320 321 322 323

  // Allocating heap numbers in the loop below can fail and cause a jump to
  // gc_required. We can't leave a partly initialized FixedArray behind,
  // so pessimistically fill it with holes now.
  Label initialization_loop, initialization_loop_entry;
  __ jmp(&initialization_loop_entry, Label::kNear);
  __ bind(&initialization_loop);
  __ movp(FieldOperand(r11, r9, times_pointer_size, FixedArray::kHeaderSize),
          rdi);
  __ bind(&initialization_loop_entry);
  __ decp(r9);
  __ j(not_sign, &initialization_loop);

  __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
324 325 326 327
  __ jmp(&entry);

  // Call into runtime if GC is required.
  __ bind(&gc_required);
328
  __ Pop(rax);
329
  __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
330 331 332 333 334 335
  __ jmp(fail);

  // Box doubles into heap numbers.
  __ bind(&loop);
  __ movq(r14, FieldOperand(r8,
                            r9,
336
                            times_8,
337 338 339 340 341 342 343
                            FixedDoubleArray::kHeaderSize));
  // r9 : current element's index
  // r14: current element
  __ cmpq(r14, rsi);
  __ j(equal, &convert_hole);

  // Non-hole double, copy value into a heap number.
344
  __ AllocateHeapNumber(rax, r15, &gc_required);
345
  // rax: new heap number
346
  __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
347
  __ movp(FieldOperand(r11,
348 349 350
                       r9,
                       times_pointer_size,
                       FixedArray::kHeaderSize),
351
          rax);
352
  __ movp(r15, r9);
353 354
  __ RecordWriteArray(r11,
                      rax,
355 356 357 358 359 360 361 362
                      r15,
                      kDontSaveFPRegs,
                      EMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);
  __ jmp(&entry, Label::kNear);

  // Replace the-hole NaN with the-hole pointer.
  __ bind(&convert_hole);
363
  __ movp(FieldOperand(r11,
364 365 366 367 368 369
                       r9,
                       times_pointer_size,
                       FixedArray::kHeaderSize),
          rdi);

  __ bind(&entry);
370
  __ decp(r9);
371
  __ j(not_sign, &loop);
372 373

  // Replace receiver's backing store with newly created and filled FixedArray.
374
  __ movp(FieldOperand(rdx, JSObject::kElementsOffset), r11);
375 376
  __ RecordWriteField(rdx,
                      JSObject::kElementsOffset,
377 378
                      r11,
                      r15,
379 380 381
                      kDontSaveFPRegs,
                      EMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);
382
  __ Pop(rax);
383
  __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
384 385 386

  __ bind(&only_change_map);
  // Set transitioned map.
387
  __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
388 389 390 391 392 393 394
  __ RecordWriteField(rdx,
                      HeapObject::kMapOffset,
                      rbx,
                      rdi,
                      kDontSaveFPRegs,
                      OMIT_REMEMBERED_SET,
                      OMIT_SMI_CHECK);
395
}
396

397 398 399 400 401 402 403

void StringCharLoadGenerator::Generate(MacroAssembler* masm,
                                       Register string,
                                       Register index,
                                       Register result,
                                       Label* call_runtime) {
  // Fetch the instance type of the receiver into result register.
404
  __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
405 406 407 408 409
  __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));

  // We need special handling for indirect strings.
  Label check_sequential;
  __ testb(result, Immediate(kIsIndirectStringMask));
410
  __ j(zero, &check_sequential, Label::kNear);
411 412 413 414 415 416 417 418 419

  // Dispatch on the indirect string shape: slice or cons.
  Label cons_string;
  __ testb(result, Immediate(kSlicedNotConsMask));
  __ j(zero, &cons_string, Label::kNear);

  // Handle slices.
  Label indirect_string_loaded;
  __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
420
  __ addp(index, result);
421
  __ movp(string, FieldOperand(string, SlicedString::kParentOffset));
422 423 424 425 426 427 428 429 430
  __ jmp(&indirect_string_loaded, Label::kNear);

  // Handle cons strings.
  // Check whether the right hand side is the empty string (i.e. if
  // this is really a flat string in a cons string). If that is not
  // the case we would rather go to the runtime system now to flatten
  // the string.
  __ bind(&cons_string);
  __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
431
                 Heap::kempty_stringRootIndex);
432
  __ j(not_equal, call_runtime);
433
  __ movp(string, FieldOperand(string, ConsString::kFirstOffset));
434 435

  __ bind(&indirect_string_loaded);
436
  __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
437 438 439 440 441
  __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));

  // Distinguish sequential and external strings. Only these two string
  // representations can reach here (slices and flat cons strings have been
  // reduced to the underlying sequential or external string).
442
  Label seq_string;
443 444 445
  __ bind(&check_sequential);
  STATIC_ASSERT(kSeqStringTag == 0);
  __ testb(result, Immediate(kStringRepresentationMask));
446 447 448
  __ j(zero, &seq_string, Label::kNear);

  // Handle external strings.
449
  Label one_byte_external, done;
450 451 452 453
  if (FLAG_debug_code) {
    // Assert that we do not have a cons or slice (indirect strings) here.
    // Sequential strings have already been ruled out.
    __ testb(result, Immediate(kIsIndirectStringMask));
454
    __ Assert(zero, kExternalStringExpectedButNotFound);
455 456
  }
  // Rule out short external strings.
457
  STATIC_ASSERT(kShortExternalStringTag != 0);
458 459 460 461 462
  __ testb(result, Immediate(kShortExternalStringTag));
  __ j(not_zero, call_runtime);
  // Check encoding.
  STATIC_ASSERT(kTwoByteStringTag == 0);
  __ testb(result, Immediate(kStringEncodingMask));
463
  __ movp(result, FieldOperand(string, ExternalString::kResourceDataOffset));
464
  __ j(not_equal, &one_byte_external, Label::kNear);
465 466 467
  // Two-byte string.
  __ movzxwl(result, Operand(result, index, times_2, 0));
  __ jmp(&done, Label::kNear);
468 469
  __ bind(&one_byte_external);
  // One-byte string.
470 471
  __ movzxbl(result, Operand(result, index, times_1, 0));
  __ jmp(&done, Label::kNear);
472

473 474
  // Dispatch on the encoding: one-byte or two-byte.
  Label one_byte;
475
  __ bind(&seq_string);
476
  STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
477 478
  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
  __ testb(result, Immediate(kStringEncodingMask));
479
  __ j(not_zero, &one_byte, Label::kNear);
480 481 482 483 484 485 486 487 488 489

  // Two-byte string.
  // Load the two-byte character code into the result register.
  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
  __ movzxwl(result, FieldOperand(string,
                                  index,
                                  times_2,
                                  SeqTwoByteString::kHeaderSize));
  __ jmp(&done, Label::kNear);

490
  // One-byte string.
491
  // Load the byte into the result register.
492
  __ bind(&one_byte);
493 494 495
  __ movzxbl(result, FieldOperand(string,
                                  index,
                                  times_1,
496
                                  SeqOneByteString::kHeaderSize));
497 498 499
  __ bind(&done);
}

500 501 502 503 504 505 506

void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
                                   XMMRegister input,
                                   XMMRegister result,
                                   XMMRegister double_scratch,
                                   Register temp1,
                                   Register temp2) {
507 508 509 510 511
  DCHECK(!input.is(result));
  DCHECK(!input.is(double_scratch));
  DCHECK(!result.is(double_scratch));
  DCHECK(!temp1.is(temp2));
  DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
512
  DCHECK(!masm->serializer_enabled());  // External references not serializable.
513 514 515

  Label done;

516
  __ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
517
  __ Movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize));
518
  __ Xorpd(result, result);
519
  __ Ucomisd(double_scratch, input);
520
  __ j(above_equal, &done);
521
  __ Ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize));
522
  __ Movsd(result, Operand(kScratchRegister, 2 * kDoubleSize));
523
  __ j(above_equal, &done);
524 525
  __ Movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize));
  __ Movsd(result, Operand(kScratchRegister, 4 * kDoubleSize));
526 527
  __ Mulsd(double_scratch, input);
  __ Addsd(double_scratch, result);
528
  __ Movq(temp2, double_scratch);
529
  __ Subsd(double_scratch, result);
530
  __ Movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
531
  __ leaq(temp1, Operand(temp2, 0x1ff800));
532
  __ andq(temp2, Immediate(0x7ff));
533
  __ shrq(temp1, Immediate(11));
534
  __ Mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
535
  __ Move(kScratchRegister, ExternalReference::math_exp_log_table());
536
  __ shlq(temp1, Immediate(52));
537
  __ orq(temp1, Operand(kScratchRegister, temp2, times_8, 0));
538
  __ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
539
  __ Subsd(double_scratch, input);
540
  __ Movsd(input, double_scratch);
541 542 543
  __ Subsd(result, double_scratch);
  __ Mulsd(input, double_scratch);
  __ Mulsd(result, input);
544
  __ Movq(input, temp1);
545 546 547 548
  __ Mulsd(result, Operand(kScratchRegister, 7 * kDoubleSize));
  __ Subsd(result, double_scratch);
  __ Addsd(result, Operand(kScratchRegister, 8 * kDoubleSize));
  __ Mulsd(result, input);
549 550 551 552

  __ bind(&done);
}

lrn@chromium.org's avatar
lrn@chromium.org committed
553
#undef __
554

555

556 557
CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
  USE(isolate);
558
  DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
559 560 561
  // The sequence of instructions that is patched out for aging code is the
  // following boilerplate stack-building prologue that is found both in
  // FUNCTION and OPTIMIZED_FUNCTION code:
562 563
  CodePatcher patcher(isolate, young_sequence_.start(),
                      young_sequence_.length());
564 565 566 567
  patcher.masm()->pushq(rbp);
  patcher.masm()->movp(rbp, rsp);
  patcher.masm()->Push(rsi);
  patcher.masm()->Push(rdi);
568 569 570
}


571 572 573 574 575 576 577 578 579
#ifdef DEBUG
bool CodeAgingHelper::IsOld(byte* candidate) const {
  return *candidate == kCallOpcode;
}
#endif


bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
  bool result = isolate->code_aging_helper()->IsYoung(sequence);
580
  DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
581 582 583 584
  return result;
}


585
void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
586
                               MarkingParity* parity) {
587
  if (IsYoungSequence(isolate, sequence)) {
588
    *age = kNoAgeCodeAge;
589 590 591 592 593 594 595 596 597 598 599
    *parity = NO_MARKING_PARITY;
  } else {
    sequence++;  // Skip the kCallOpcode byte
    Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
        Assembler::kCallTargetAddressOffset;
    Code* stub = GetCodeFromTargetAddress(target_address);
    GetCodeAgeAndParity(stub, age, parity);
  }
}


600 601
void Code::PatchPlatformCodeAge(Isolate* isolate,
                                byte* sequence,
602 603
                                Code::Age age,
                                MarkingParity parity) {
604
  uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
605
  if (age == kNoAgeCodeAge) {
606
    isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
607
    Assembler::FlushICache(isolate, sequence, young_length);
608
  } else {
609
    Code* stub = GetCodeAgeStub(isolate, age, parity);
610
    CodePatcher patcher(isolate, sequence, young_length);
611
    patcher.masm()->call(stub->instruction_start());
612 613
    patcher.masm()->Nop(
        kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
614 615 616 617
  }
}


618
Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
619
  DCHECK(index >= 0);
620 621 622 623 624 625 626
  int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0;
  int displacement_to_last_argument = base_reg_.is(rsp) ?
      kPCOnStackSize : kFPOnStackSize + kPCOnStackSize;
  displacement_to_last_argument += extra_displacement_to_last_argument_;
  if (argument_count_reg_.is(no_reg)) {
    // argument[0] is at base_reg_ + displacement_to_last_argument +
    // (argument_count_immediate_ + receiver - 1) * kPointerSize.
627
    DCHECK(argument_count_immediate_ + receiver > 0);
628 629 630 631 632 633 634 635 636 637 638
    return Operand(base_reg_, displacement_to_last_argument +
        (argument_count_immediate_ + receiver - 1 - index) * kPointerSize);
  } else {
    // argument[0] is at base_reg_ + displacement_to_last_argument +
    // argument_count_reg_ * times_pointer_size + (receiver - 1) * kPointerSize.
    return Operand(base_reg_, argument_count_reg_, times_pointer_size,
        displacement_to_last_argument + (receiver - 1 - index) * kPointerSize);
  }
}


639 640
}  // namespace internal
}  // namespace v8
641 642

#endif  // V8_TARGET_ARCH_X64