builtins-internal-gen.cc 31.5 KB
Newer Older
1 2 3 4
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

5
#include "src/api.h"
6 7 8
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
9
#include "src/heap/heap-inl.h"  // crbug.com/v8/8499
10
#include "src/ic/accessor-assembler.h"
11
#include "src/ic/keyed-store-generic.h"
12
#include "src/macro-assembler.h"
13
#include "src/objects/debug-objects.h"
14
#include "src/objects/shared-function-info.h"
15 16 17 18 19
#include "src/runtime/runtime.h"

namespace v8 {
namespace internal {

20 21 22
template <typename T>
using TNode = compiler::TNode<T>;

23
// -----------------------------------------------------------------------------
24
// Stack checks.
25 26 27 28 29 30 31 32 33 34 35 36

void Builtins::Generate_StackCheck(MacroAssembler* masm) {
  masm->TailCallRuntime(Runtime::kStackGuard);
}

// -----------------------------------------------------------------------------
// TurboFan support builtins.

TF_BUILTIN(CopyFastSmiOrObjectElements, CodeStubAssembler) {
  Node* object = Parameter(Descriptor::kObject);

  // Load the {object}s elements.
37
  Node* source = LoadObjectField(object, JSObject::kElementsOffset);
38
  Node* target = CloneFixedArray(source, ExtractFixedArrayFlag::kFixedArrays);
39 40
  StoreObjectField(object, JSObject::kElementsOffset, target);
  Return(target);
41 42 43 44 45 46 47 48 49
}

TF_BUILTIN(GrowFastDoubleElements, CodeStubAssembler) {
  Node* object = Parameter(Descriptor::kObject);
  Node* key = Parameter(Descriptor::kKey);
  Node* context = Parameter(Descriptor::kContext);

  Label runtime(this, Label::kDeferred);
  Node* elements = LoadElements(object);
50
  elements = TryGrowElementsCapacity(object, elements, PACKED_DOUBLE_ELEMENTS,
51 52 53
                                     key, &runtime);
  Return(elements);

54
  BIND(&runtime);
55 56 57 58 59 60 61 62 63 64 65
  TailCallRuntime(Runtime::kGrowArrayElements, context, object, key);
}

TF_BUILTIN(GrowFastSmiOrObjectElements, CodeStubAssembler) {
  Node* object = Parameter(Descriptor::kObject);
  Node* key = Parameter(Descriptor::kKey);
  Node* context = Parameter(Descriptor::kContext);

  Label runtime(this, Label::kDeferred);
  Node* elements = LoadElements(object);
  elements =
66
      TryGrowElementsCapacity(object, elements, PACKED_ELEMENTS, key, &runtime);
67 68
  Return(elements);

69
  BIND(&runtime);
70 71 72
  TailCallRuntime(Runtime::kGrowArrayElements, context, object, key);
}

73
TF_BUILTIN(NewArgumentsElements, CodeStubAssembler) {
74
  Node* frame = Parameter(Descriptor::kFrame);
75 76 77
  TNode<IntPtrT> length = SmiToIntPtr(Parameter(Descriptor::kLength));
  TNode<IntPtrT> mapped_count =
      SmiToIntPtr(Parameter(Descriptor::kMappedCount));
78 79

  // Check if we can allocate in new space.
80
  ElementsKind kind = PACKED_ELEMENTS;
81 82 83 84 85
  int max_elements = FixedArray::GetMaxLengthForNewSpaceAllocation(kind);
  Label if_newspace(this), if_oldspace(this, Label::kDeferred);
  Branch(IntPtrLessThan(length, IntPtrConstant(max_elements)), &if_newspace,
         &if_oldspace);

86
  BIND(&if_newspace);
87 88 89 90 91 92 93
  {
    // Prefer EmptyFixedArray in case of non-positive {length} (the {length}
    // can be negative here for rest parameters).
    Label if_empty(this), if_notempty(this);
    Branch(IntPtrLessThanOrEqual(length, IntPtrConstant(0)), &if_empty,
           &if_notempty);

94
    BIND(&if_empty);
95 96
    Return(EmptyFixedArrayConstant());

97
    BIND(&if_notempty);
98 99
    {
      // Allocate a FixedArray in new space.
100
      TNode<FixedArray> result = CAST(AllocateFixedArray(kind, length));
101

102 103 104
      // The elements might be used to back mapped arguments. In that case fill
      // the mapped elements (i.e. the first {mapped_count}) with the hole, but
      // make sure not to overshoot the {length} if some arguments are missing.
105
      TNode<IntPtrT> number_of_holes = IntPtrMin(mapped_count, length);
106 107 108
      Node* the_hole = TheHoleConstant();

      // Fill the first elements up to {number_of_holes} with the hole.
109
      TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
110 111 112 113 114
      Label loop1(this, &var_index), done_loop1(this);
      Goto(&loop1);
      BIND(&loop1);
      {
        // Load the current {index}.
115
        TNode<IntPtrT> index = var_index.value();
116 117 118 119 120 121 122 123

        // Check if we are done.
        GotoIf(WordEqual(index, number_of_holes), &done_loop1);

        // Store the hole into the {result}.
        StoreFixedArrayElement(result, index, the_hole, SKIP_WRITE_BARRIER);

        // Continue with next {index}.
124
        var_index = IntPtrAdd(index, IntPtrConstant(1));
125 126 127 128
        Goto(&loop1);
      }
      BIND(&done_loop1);

129
      // Compute the effective {offset} into the {frame}.
130
      TNode<IntPtrT> offset = IntPtrAdd(length, IntPtrConstant(1));
131 132

      // Copy the parameters from {frame} (starting at {offset}) to {result}.
133 134 135
      Label loop2(this, &var_index), done_loop2(this);
      Goto(&loop2);
      BIND(&loop2);
136 137
      {
        // Load the current {index}.
138
        TNode<IntPtrT> index = var_index.value();
139 140

        // Check if we are done.
141
        GotoIf(WordEqual(index, length), &done_loop2);
142 143

        // Load the parameter at the given {index}.
144 145 146
        TNode<Object> value = BitcastWordToTagged(
            Load(MachineType::Pointer(), frame,
                 TimesSystemPointerSize(IntPtrSub(offset, index))));
147 148 149 150 151

        // Store the {value} into the {result}.
        StoreFixedArrayElement(result, index, value, SKIP_WRITE_BARRIER);

        // Continue with next {index}.
152
        var_index = IntPtrAdd(index, IntPtrConstant(1));
153
        Goto(&loop2);
154
      }
155
      BIND(&done_loop2);
156 157 158 159 160

      Return(result);
    }
  }

161
  BIND(&if_oldspace);
162 163 164
  {
    // Allocate in old space (or large object space).
    TailCallRuntime(Runtime::kNewArgumentsElements, NoContextConstant(),
165 166
                    BitcastWordToTagged(frame), SmiFromIntPtr(length),
                    SmiFromIntPtr(mapped_count));
167 168 169
  }
}

170 171 172
TF_BUILTIN(ReturnReceiver, CodeStubAssembler) {
  Return(Parameter(Descriptor::kReceiver));
}
173

174 175
TF_BUILTIN(DebugBreakTrampoline, CodeStubAssembler) {
  Label tailcall_to_shared(this);
176 177
  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
  TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
178
  TNode<Int32T> arg_count =
179
      UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
180
  TNode<JSFunction> function = CAST(Parameter(Descriptor::kJSTarget));
181 182 183 184

  // Check break-at-entry flag on the debug info.
  TNode<SharedFunctionInfo> shared =
      CAST(LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset));
185 186
  TNode<Object> maybe_heap_object_or_smi =
      LoadObjectField(shared, SharedFunctionInfo::kScriptOrDebugInfoOffset);
187 188 189 190
  TNode<HeapObject> maybe_debug_info =
      TaggedToHeapObject(maybe_heap_object_or_smi, &tailcall_to_shared);
  GotoIfNot(HasInstanceType(maybe_debug_info, InstanceType::DEBUG_INFO_TYPE),
            &tailcall_to_shared);
191 192 193 194 195 196 197 198 199 200 201 202 203 204

  {
    TNode<DebugInfo> debug_info = CAST(maybe_debug_info);
    TNode<Smi> flags =
        CAST(LoadObjectField(debug_info, DebugInfo::kFlagsOffset));
    GotoIfNot(SmiToInt32(SmiAnd(flags, SmiConstant(DebugInfo::kBreakAtEntry))),
              &tailcall_to_shared);

    CallRuntime(Runtime::kDebugBreakAtEntry, context, function);
    Goto(&tailcall_to_shared);
  }

  BIND(&tailcall_to_shared);
  // Tail call into code object on the SharedFunctionInfo.
205
  TNode<Code> code = GetSharedFunctionInfoCode(shared);
206
  TailCallJSCode(code, context, function, new_target, arg_count);
207 208
}

209 210 211 212 213 214 215 216 217 218 219
class RecordWriteCodeStubAssembler : public CodeStubAssembler {
 public:
  explicit RecordWriteCodeStubAssembler(compiler::CodeAssemblerState* state)
      : CodeStubAssembler(state) {}

  Node* IsMarking() {
    Node* is_marking_addr = ExternalConstant(
        ExternalReference::heap_is_marking_flag_address(this->isolate()));
    return Load(MachineType::Uint8(), is_marking_addr);
  }

220 221 222 223 224
  TNode<BoolT> IsPageFlagSet(TNode<IntPtrT> object, int mask) {
    TNode<IntPtrT> page = PageFromAddress(object);
    TNode<IntPtrT> flags =
        UncheckedCast<IntPtrT>(Load(MachineType::Pointer(), page,
                                    IntPtrConstant(MemoryChunk::kFlagsOffset)));
225 226 227 228
    return WordNotEqual(WordAnd(flags, IntPtrConstant(mask)),
                        IntPtrConstant(0));
  }

229
  TNode<BoolT> IsWhite(TNode<IntPtrT> object) {
230
    DCHECK_EQ(strcmp(Marking::kWhiteBitPattern, "00"), 0);
231 232 233
    Node* cell;
    Node* mask;
    GetMarkBit(object, &cell, &mask);
234
    mask = TruncateIntPtrToInt32(mask);
235 236
    // Non-white has 1 for the first bit, so we only need to check for the first
    // bit.
237 238
    return Word32Equal(Word32And(Load(MachineType::Int32(), cell), mask),
                       Int32Constant(0));
239 240
  }

241 242
  void GetMarkBit(TNode<IntPtrT> object, Node** cell, Node** mask) {
    TNode<IntPtrT> page = PageFromAddress(object);
243 244
    Node* bitmap = Load(MachineType::Pointer(), page,
                        IntPtrConstant(MemoryChunk::kMarkBitmapOffset));
245 246 247 248

    {
      // Temp variable to calculate cell offset in bitmap.
      Node* r0;
249
      int shift = Bitmap::kBitsPerCellLog2 + kTaggedSizeLog2 -
250 251
                  Bitmap::kBytesPerCellLog2;
      r0 = WordShr(object, IntPtrConstant(shift));
252
      r0 = WordAnd(r0, IntPtrConstant((kPageAlignmentMask >> shift) &
253
                                      ~(Bitmap::kBytesPerCell - 1)));
254
      *cell = IntPtrAdd(bitmap, r0);
255 256 257 258
    }
    {
      // Temp variable to calculate bit offset in cell.
      Node* r1;
259
      r1 = WordShr(object, IntPtrConstant(kTaggedSizeLog2));
260 261 262 263 264 265 266 267
      r1 = WordAnd(r1, IntPtrConstant((1 << Bitmap::kBitsPerCellLog2) - 1));
      // It seems that LSB(e.g. cl) is automatically used, so no manual masking
      // is needed. Uncomment the following line otherwise.
      // WordAnd(r1, IntPtrConstant((1 << kBitsPerByte) - 1)));
      *mask = WordShl(IntPtrConstant(1), r1);
    }
  }

268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
  Node* ShouldSkipFPRegs(Node* mode) {
    return WordEqual(mode, SmiConstant(kDontSaveFPRegs));
  }

  Node* ShouldEmitRememberSet(Node* remembered_set) {
    return WordEqual(remembered_set, SmiConstant(EMIT_REMEMBERED_SET));
  }

  void CallCFunction1WithCallerSavedRegistersMode(MachineType return_type,
                                                  MachineType arg0_type,
                                                  Node* function, Node* arg0,
                                                  Node* mode, Label* next) {
    Label dont_save_fp(this), save_fp(this);
    Branch(ShouldSkipFPRegs(mode), &dont_save_fp, &save_fp);
    BIND(&dont_save_fp);
    {
      CallCFunction1WithCallerSavedRegisters(return_type, arg0_type, function,
                                             arg0, kDontSaveFPRegs);
      Goto(next);
    }

    BIND(&save_fp);
    {
      CallCFunction1WithCallerSavedRegisters(return_type, arg0_type, function,
                                             arg0, kSaveFPRegs);
      Goto(next);
    }
  }

  void CallCFunction3WithCallerSavedRegistersMode(
      MachineType return_type, MachineType arg0_type, MachineType arg1_type,
      MachineType arg2_type, Node* function, Node* arg0, Node* arg1, Node* arg2,
      Node* mode, Label* next) {
    Label dont_save_fp(this), save_fp(this);
    Branch(ShouldSkipFPRegs(mode), &dont_save_fp, &save_fp);
    BIND(&dont_save_fp);
    {
      CallCFunction3WithCallerSavedRegisters(return_type, arg0_type, arg1_type,
                                             arg2_type, function, arg0, arg1,
                                             arg2, kDontSaveFPRegs);
      Goto(next);
    }

    BIND(&save_fp);
    {
      CallCFunction3WithCallerSavedRegisters(return_type, arg0_type, arg1_type,
                                             arg2_type, function, arg0, arg1,
                                             arg2, kSaveFPRegs);
      Goto(next);
    }
  }

  void InsertToStoreBufferAndGoto(Node* isolate, Node* slot, Node* mode,
                                  Label* next) {
322 323 324 325 326 327 328
    Node* store_buffer_top_addr =
        ExternalConstant(ExternalReference::store_buffer_top(this->isolate()));
    Node* store_buffer_top =
        Load(MachineType::Pointer(), store_buffer_top_addr);
    StoreNoWriteBarrier(MachineType::PointerRepresentation(), store_buffer_top,
                        slot);
    Node* new_store_buffer_top =
329
        IntPtrAdd(store_buffer_top, IntPtrConstant(kSystemPointerSize));
330 331 332 333
    StoreNoWriteBarrier(MachineType::PointerRepresentation(),
                        store_buffer_top_addr, new_store_buffer_top);

    Node* test = WordAnd(new_store_buffer_top,
334
                         IntPtrConstant(Heap::store_buffer_mask_constant()));
335 336 337 338 339 340

    Label overflow(this);
    Branch(WordEqual(test, IntPtrConstant(0)), &overflow, next);

    BIND(&overflow);
    {
341 342
      Node* function =
          ExternalConstant(ExternalReference::store_buffer_overflow_function());
343 344 345
      CallCFunction1WithCallerSavedRegistersMode(MachineType::Int32(),
                                                 MachineType::Pointer(),
                                                 function, isolate, mode, next);
346 347 348 349 350
    }
  }
};

TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
351
  Label generational_wb(this);
352 353 354
  Label incremental_wb(this);
  Label exit(this);

355
  Node* remembered_set = Parameter(Descriptor::kRememberedSet);
356 357
  Branch(ShouldEmitRememberSet(remembered_set), &generational_wb,
         &incremental_wb);
358

359
  BIND(&generational_wb);
360
  {
361
    Label test_old_to_young_flags(this);
362
    Label store_buffer_exit(this), store_buffer_incremental_wb(this);
363

364 365 366 367 368 369
    // When incremental marking is not on, we skip cross generation pointer
    // checking here, because there are checks for
    // `kPointersFromHereAreInterestingMask` and
    // `kPointersToHereAreInterestingMask` in
    // `src/compiler/<arch>/code-generator-<arch>.cc` before calling this stub,
    // which serves as the cross generation checking.
370
    TNode<IntPtrT> slot = UncheckedCast<IntPtrT>(Parameter(Descriptor::kSlot));
371
    Branch(IsMarking(), &test_old_to_young_flags, &store_buffer_exit);
372

373
    BIND(&test_old_to_young_flags);
374
    {
375 376 377
      // TODO(ishell): do a new-space range check instead.
      TNode<IntPtrT> value =
          BitcastTaggedToWord(Load(MachineType::TaggedPointer(), slot));
378

379 380
      // TODO(albertnetymk): Try to cache the page flag for value and object,
      // instead of calling IsPageFlagSet each time.
381
      TNode<BoolT> value_is_young =
382 383
          IsPageFlagSet(value, MemoryChunk::kIsInYoungGenerationMask);
      GotoIfNot(value_is_young, &incremental_wb);
384

385 386 387
      TNode<IntPtrT> object =
          BitcastTaggedToWord(Parameter(Descriptor::kObject));
      TNode<BoolT> object_is_young =
388 389
          IsPageFlagSet(object, MemoryChunk::kIsInYoungGenerationMask);
      Branch(object_is_young, &incremental_wb, &store_buffer_incremental_wb);
390 391 392
    }

    BIND(&store_buffer_exit);
393 394 395
    {
      Node* isolate_constant =
          ExternalConstant(ExternalReference::isolate_address(isolate()));
396
      Node* fp_mode = Parameter(Descriptor::kFPMode);
397 398
      InsertToStoreBufferAndGoto(isolate_constant, slot, fp_mode, &exit);
    }
399 400

    BIND(&store_buffer_incremental_wb);
401 402 403
    {
      Node* isolate_constant =
          ExternalConstant(ExternalReference::isolate_address(isolate()));
404
      Node* fp_mode = Parameter(Descriptor::kFPMode);
405 406 407
      InsertToStoreBufferAndGoto(isolate_constant, slot, fp_mode,
                                 &incremental_wb);
    }
408
  }
409 410 411 412 413

  BIND(&incremental_wb);
  {
    Label call_incremental_wb(this);

414 415 416
    TNode<IntPtrT> slot = UncheckedCast<IntPtrT>(Parameter(Descriptor::kSlot));
    TNode<IntPtrT> value =
        BitcastTaggedToWord(Load(MachineType::TaggedPointer(), slot));
417

418 419 420 421 422 423 424 425
    // There are two cases we need to call incremental write barrier.
    // 1) value_is_white
    GotoIf(IsWhite(value), &call_incremental_wb);

    // 2) is_compacting && value_in_EC && obj_isnt_skip
    // is_compacting = true when is_marking = true
    GotoIfNot(IsPageFlagSet(value, MemoryChunk::kEvacuationCandidateMask),
              &exit);
426

427
    TNode<IntPtrT> object = BitcastTaggedToWord(Parameter(Descriptor::kObject));
428
    Branch(
429
        IsPageFlagSet(object, MemoryChunk::kSkipEvacuationSlotsRecordingMask),
430
        &exit, &call_incremental_wb);
431 432 433

    BIND(&call_incremental_wb);
    {
434
      Node* function = ExternalConstant(
435
          ExternalReference::incremental_marking_record_write_function());
436 437
      Node* isolate_constant =
          ExternalConstant(ExternalReference::isolate_address(isolate()));
438
      Node* fp_mode = Parameter(Descriptor::kFPMode);
439 440
      TNode<IntPtrT> object =
          BitcastTaggedToWord(Parameter(Descriptor::kObject));
441
      CallCFunction3WithCallerSavedRegistersMode(
442
          MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(),
443 444
          MachineType::Pointer(), function, object, slot, isolate_constant,
          fp_mode, &exit);
445 446 447 448 449 450 451
    }
  }

  BIND(&exit);
  Return(TrueConstant());
}

452
class DeletePropertyBaseAssembler : public AccessorAssembler {
453 454
 public:
  explicit DeletePropertyBaseAssembler(compiler::CodeAssemblerState* state)
455
      : AccessorAssembler(state) {}
456

457 458 459 460 461
  void DeleteDictionaryProperty(TNode<Object> receiver,
                                TNode<NameDictionary> properties,
                                TNode<Name> name, TNode<Context> context,
                                Label* dont_delete, Label* notfound) {
    TVARIABLE(IntPtrT, var_name_index);
462 463 464 465 466
    Label dictionary_found(this, &var_name_index);
    NameDictionaryLookup<NameDictionary>(properties, name, &dictionary_found,
                                         &var_name_index, notfound);

    BIND(&dictionary_found);
467 468
    TNode<IntPtrT> key_index = var_name_index.value();
    TNode<Uint32T> details =
469 470 471 472
        LoadDetailsByKeyIndex<NameDictionary>(properties, key_index);
    GotoIf(IsSetWord32(details, PropertyDetails::kAttributesDontDeleteMask),
           dont_delete);
    // Overwrite the entry itself (see NameDictionary::SetEntry).
473
    TNode<HeapObject> filler = TheHoleConstant();
474
    DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kTheHoleValue));
475 476 477 478
    StoreFixedArrayElement(properties, key_index, filler, SKIP_WRITE_BARRIER);
    StoreValueByKeyIndex<NameDictionary>(properties, key_index, filler,
                                         SKIP_WRITE_BARRIER);
    StoreDetailsByKeyIndex<NameDictionary>(properties, key_index,
479
                                           SmiConstant(0));
480 481

    // Update bookkeeping information (see NameDictionary::ElementRemoved).
482 483
    TNode<Smi> nof = GetNumberOfElements<NameDictionary>(properties);
    TNode<Smi> new_nof = SmiSub(nof, SmiConstant(1));
484
    SetNumberOfElements<NameDictionary>(properties, new_nof);
485 486 487
    TNode<Smi> num_deleted =
        GetNumberOfDeletedElements<NameDictionary>(properties);
    TNode<Smi> new_deleted = SmiAdd(num_deleted, SmiConstant(1));
488 489 490 491
    SetNumberOfDeletedElements<NameDictionary>(properties, new_deleted);

    // Shrink the dictionary if necessary (see NameDictionary::Shrink).
    Label shrinking_done(this);
492
    TNode<Smi> capacity = GetCapacity<NameDictionary>(properties);
493 494
    GotoIf(SmiGreaterThan(new_nof, SmiShr(capacity, 2)), &shrinking_done);
    GotoIf(SmiLessThan(new_nof, SmiConstant(16)), &shrinking_done);
495
    CallRuntime(Runtime::kShrinkPropertyDictionary, context, receiver);
496 497 498 499 500 501 502 503
    Goto(&shrinking_done);
    BIND(&shrinking_done);

    Return(TrueConstant());
  }
};

TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) {
504 505 506 507
  TNode<Object> receiver = CAST(Parameter(Descriptor::kObject));
  TNode<Object> key = CAST(Parameter(Descriptor::kKey));
  TNode<Smi> language_mode = CAST(Parameter(Descriptor::kLanguageMode));
  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
508 509 510 511 512 513 514

  VARIABLE(var_index, MachineType::PointerRepresentation());
  VARIABLE(var_unique, MachineRepresentation::kTagged, key);
  Label if_index(this), if_unique_name(this), if_notunique(this),
      if_notfound(this), slow(this);

  GotoIf(TaggedIsSmi(receiver), &slow);
515
  TNode<Map> receiver_map = LoadMap(CAST(receiver));
516 517
  TNode<Int32T> instance_type = LoadMapInstanceType(receiver_map);
  GotoIf(IsCustomElementsReceiverInstanceType(instance_type), &slow);
518 519 520 521 522 523 524 525 526 527 528 529
  TryToName(key, &if_index, &var_index, &if_unique_name, &var_unique, &slow,
            &if_notunique);

  BIND(&if_index);
  {
    Comment("integer index");
    Goto(&slow);  // TODO(jkummerow): Implement more smarts here.
  }

  BIND(&if_unique_name);
  {
    Comment("key is unique name");
530
    TNode<Name> unique = CAST(var_unique.value());
531 532 533
    CheckForAssociatedProtector(unique, &slow);

    Label dictionary(this), dont_delete(this);
534 535
    GotoIf(IsDictionaryMap(receiver_map), &dictionary);

536 537 538
    // Fast properties need to clear recorded slots, which can only be done
    // in C++.
    Goto(&slow);
539 540 541

    BIND(&dictionary);
    {
542 543
      InvalidateValidityCellIfPrototype(receiver_map);

544 545
      TNode<NameDictionary> properties =
          CAST(LoadSlowProperties(CAST(receiver)));
546 547 548 549 550 551
      DeleteDictionaryProperty(receiver, properties, unique, context,
                               &dont_delete, &if_notfound);
    }

    BIND(&dont_delete);
    {
552
      STATIC_ASSERT(LanguageModeSize == 2);
553
      GotoIf(SmiNotEqual(language_mode, SmiConstant(LanguageMode::kSloppy)),
554
             &slow);
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
      Return(FalseConstant());
    }
  }

  BIND(&if_notunique);
  {
    // If the string was not found in the string table, then no object can
    // have a property with that name.
    TryInternalizeString(key, &if_index, &var_index, &if_unique_name,
                         &var_unique, &if_notfound, &slow);
  }

  BIND(&if_notfound);
  Return(TrueConstant());

  BIND(&slow);
  {
    TailCallRuntime(Runtime::kDeleteProperty, context, receiver, key,
                    language_mode);
  }
}

577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
TF_BUILTIN(ForInEnumerate, CodeStubAssembler) {
  Node* receiver = Parameter(Descriptor::kReceiver);
  Node* context = Parameter(Descriptor::kContext);

  Label if_empty(this), if_runtime(this, Label::kDeferred);
  Node* receiver_map = CheckEnumCache(receiver, &if_empty, &if_runtime);
  Return(receiver_map);

  BIND(&if_empty);
  Return(EmptyFixedArrayConstant());

  BIND(&if_runtime);
  TailCallRuntime(Runtime::kForInEnumerate, context, receiver);
}

TF_BUILTIN(ForInFilter, CodeStubAssembler) {
  Node* key = Parameter(Descriptor::kKey);
  Node* object = Parameter(Descriptor::kObject);
  Node* context = Parameter(Descriptor::kContext);

  CSA_ASSERT(this, IsString(key));

  Label if_true(this), if_false(this);
600
  TNode<Oddball> result = HasProperty(context, object, key, kForInHasProperty);
601 602 603 604 605 606 607 608 609
  Branch(IsTrue(result), &if_true, &if_false);

  BIND(&if_true);
  Return(key);

  BIND(&if_false);
  Return(UndefinedConstant());
}

610 611 612 613 614 615 616 617 618 619 620 621 622 623
TF_BUILTIN(SameValue, CodeStubAssembler) {
  Node* lhs = Parameter(Descriptor::kLeft);
  Node* rhs = Parameter(Descriptor::kRight);

  Label if_true(this), if_false(this);
  BranchIfSameValue(lhs, rhs, &if_true, &if_false);

  BIND(&if_true);
  Return(TrueConstant());

  BIND(&if_false);
  Return(FalseConstant());
}

624 625 626 627 628
class InternalBuiltinsAssembler : public CodeStubAssembler {
 public:
  explicit InternalBuiltinsAssembler(compiler::CodeAssemblerState* state)
      : CodeStubAssembler(state) {}

629 630 631
  template <typename Descriptor>
  void GenerateAdaptorWithExitFrameType(
      Builtins::ExitFrameType exit_frame_type);
632 633
};

634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
template <typename Descriptor>
void InternalBuiltinsAssembler::GenerateAdaptorWithExitFrameType(
    Builtins::ExitFrameType exit_frame_type) {
  TNode<JSFunction> target = CAST(Parameter(Descriptor::kTarget));
  TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
  TNode<WordT> c_function =
      UncheckedCast<WordT>(Parameter(Descriptor::kCFunction));

  // The logic contained here is mirrored for TurboFan inlining in
  // JSTypedLowering::ReduceJSCall{Function,Construct}. Keep these in sync.

  // Make sure we operate in the context of the called function (for example
  // ConstructStubs implemented in C++ will be run in the context of the caller
  // instead of the callee, due to the way that [[Construct]] is defined for
  // ordinary functions).
  TNode<Context> context =
      CAST(LoadObjectField(target, JSFunction::kContextOffset));

  // Update arguments count for CEntry to contain the number of arguments
  // including the receiver and the extra arguments.
  TNode<Int32T> argc =
      UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
  argc = Int32Add(
      argc,
      Int32Constant(BuiltinExitFrameConstants::kNumExtraArgsWithReceiver));

  TNode<Code> code = HeapConstant(
      CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
                          exit_frame_type == Builtins::BUILTIN_EXIT));

  // Unconditionally push argc, target and new target as extra stack arguments.
  // They will be used by stack frame iterators when constructing stack trace.
  TailCallStub(CEntry1ArgvOnStackDescriptor{},  // descriptor
               code, context,       // standard arguments for TailCallStub
               argc, c_function,    // register arguments
               TheHoleConstant(),   // additional stack argument 1 (padding)
               SmiFromInt32(argc),  // additional stack argument 2
               target,              // additional stack argument 3
               new_target);         // additional stack argument 4
}

TF_BUILTIN(AdaptorWithExitFrame, InternalBuiltinsAssembler) {
  GenerateAdaptorWithExitFrameType<Descriptor>(Builtins::EXIT);
}

TF_BUILTIN(AdaptorWithBuiltinExitFrame, InternalBuiltinsAssembler) {
  GenerateAdaptorWithExitFrameType<Descriptor>(Builtins::BUILTIN_EXIT);
}

683
TF_BUILTIN(AllocateInNewSpace, CodeStubAssembler) {
684 685
  TNode<IntPtrT> requested_size =
      UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
686 687

  TailCallRuntime(Runtime::kAllocateInNewSpace, NoContextConstant(),
688
                  SmiFromIntPtr(requested_size));
689 690 691
}

TF_BUILTIN(AllocateInOldSpace, CodeStubAssembler) {
692 693
  TNode<IntPtrT> requested_size =
      UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
694 695 696

  int flags = AllocateTargetSpace::encode(OLD_SPACE);
  TailCallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(),
697
                  SmiFromIntPtr(requested_size), SmiConstant(flags));
698 699
}

700 701 702 703 704
TF_BUILTIN(Abort, CodeStubAssembler) {
  TNode<Smi> message_id = CAST(Parameter(Descriptor::kMessageOrMessageId));
  TailCallRuntime(Runtime::kAbort, NoContextConstant(), message_id);
}

705
TF_BUILTIN(AbortJS, CodeStubAssembler) {
706 707
  TNode<String> message = CAST(Parameter(Descriptor::kMessageOrMessageId));
  TailCallRuntime(Runtime::kAbortJS, NoContextConstant(), message);
708 709
}

710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761
void Builtins::Generate_CEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit(
    MacroAssembler* masm) {
  Generate_CEntry(masm, 1, kDontSaveFPRegs, kArgvOnStack, false);
}

void Builtins::Generate_CEntry_Return1_DontSaveFPRegs_ArgvOnStack_BuiltinExit(
    MacroAssembler* masm) {
  Generate_CEntry(masm, 1, kDontSaveFPRegs, kArgvOnStack, true);
}

void Builtins::
    Generate_CEntry_Return1_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit(
        MacroAssembler* masm) {
  Generate_CEntry(masm, 1, kDontSaveFPRegs, kArgvInRegister, false);
}

void Builtins::Generate_CEntry_Return1_SaveFPRegs_ArgvOnStack_NoBuiltinExit(
    MacroAssembler* masm) {
  Generate_CEntry(masm, 1, kSaveFPRegs, kArgvOnStack, false);
}

void Builtins::Generate_CEntry_Return1_SaveFPRegs_ArgvOnStack_BuiltinExit(
    MacroAssembler* masm) {
  Generate_CEntry(masm, 1, kSaveFPRegs, kArgvOnStack, true);
}

void Builtins::Generate_CEntry_Return2_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit(
    MacroAssembler* masm) {
  Generate_CEntry(masm, 2, kDontSaveFPRegs, kArgvOnStack, false);
}

void Builtins::Generate_CEntry_Return2_DontSaveFPRegs_ArgvOnStack_BuiltinExit(
    MacroAssembler* masm) {
  Generate_CEntry(masm, 2, kDontSaveFPRegs, kArgvOnStack, true);
}

void Builtins::
    Generate_CEntry_Return2_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit(
        MacroAssembler* masm) {
  Generate_CEntry(masm, 2, kDontSaveFPRegs, kArgvInRegister, false);
}

void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_NoBuiltinExit(
    MacroAssembler* masm) {
  Generate_CEntry(masm, 2, kSaveFPRegs, kArgvOnStack, false);
}

void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit(
    MacroAssembler* masm) {
  Generate_CEntry(masm, 2, kSaveFPRegs, kArgvOnStack, true);
}

762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779
#if !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS)
void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) {
  masm->Call(BUILTIN_CODE(masm->isolate(), Illegal), RelocInfo::CODE_TARGET);
}
#endif  // !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS)

#ifndef V8_TARGET_ARCH_ARM
void Builtins::Generate_MemCopyUint16Uint8(MacroAssembler* masm) {
  masm->Call(BUILTIN_CODE(masm->isolate(), Illegal), RelocInfo::CODE_TARGET);
}
#endif  // V8_TARGET_ARCH_ARM

#ifndef V8_TARGET_ARCH_IA32
void Builtins::Generate_MemMove(MacroAssembler* masm) {
  masm->Call(BUILTIN_CODE(masm->isolate(), Illegal), RelocInfo::CODE_TARGET);
}
#endif  // V8_TARGET_ARCH_IA32

780 781 782 783 784
// ES6 [[Get]] operation.
TF_BUILTIN(GetProperty, CodeStubAssembler) {
  Node* object = Parameter(Descriptor::kObject);
  Node* key = Parameter(Descriptor::kKey);
  Node* context = Parameter(Descriptor::kContext);
785 786
  Label if_notfound(this), if_proxy(this, Label::kDeferred),
      if_slow(this, Label::kDeferred);
787 788

  CodeStubAssembler::LookupInHolder lookup_property_in_holder =
789 790 791
      [=](Node* receiver, Node* holder, Node* holder_map,
          Node* holder_instance_type, Node* unique_name, Label* next_holder,
          Label* if_bailout) {
792 793 794 795 796 797
        VARIABLE(var_value, MachineRepresentation::kTagged);
        Label if_found(this);
        TryGetOwnProperty(context, receiver, holder, holder_map,
                          holder_instance_type, unique_name, &if_found,
                          &var_value, next_holder, if_bailout);
        BIND(&if_found);
798
        Return(var_value.value());
799 800 801 802 803 804 805 806 807 808 809 810
      };

  CodeStubAssembler::LookupInHolder lookup_element_in_holder =
      [=](Node* receiver, Node* holder, Node* holder_map,
          Node* holder_instance_type, Node* index, Label* next_holder,
          Label* if_bailout) {
        // Not supported yet.
        Use(next_holder);
        Goto(if_bailout);
      };

  TryPrototypeChainLookup(object, key, lookup_property_in_holder,
811 812
                          lookup_element_in_holder, &if_notfound, &if_slow,
                          &if_proxy);
813

814 815
  BIND(&if_notfound);
  Return(UndefinedConstant());
816

817 818 819 820
  BIND(&if_slow);
  TailCallRuntime(Runtime::kGetProperty, context, object, key);

  BIND(&if_proxy);
821
  {
822 823 824 825 826 827 828 829
    // Convert the {key} to a Name first.
    Node* name = CallBuiltin(Builtins::kToName, context, key);

    // The {object} is a JSProxy instance, look up the {name} on it, passing
    // {object} both as receiver and holder. If {name} is absent we can safely
    // return undefined from here.
    TailCallBuiltin(Builtins::kProxyGetProperty, context, object, name, object,
                    SmiConstant(OnNonExistent::kReturnUndefined));
830 831 832
  }
}

833 834 835 836 837 838 839
// ES6 [[Set]] operation.
TF_BUILTIN(SetProperty, CodeStubAssembler) {
  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
  TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
  TNode<Object> key = CAST(Parameter(Descriptor::kKey));
  TNode<Object> value = CAST(Parameter(Descriptor::kValue));

840 841
  KeyedStoreGenericGenerator::SetProperty(state(), context, receiver, key,
                                          value, LanguageMode::kStrict);
842 843
}

844 845 846 847 848 849 850 851 852 853 854 855 856 857
// ES6 CreateDataProperty(), specialized for the case where objects are still
// being initialized, and have not yet been made accessible to the user. Thus,
// any operation here should be unobservable until after the object has been
// returned.
TF_BUILTIN(SetPropertyInLiteral, CodeStubAssembler) {
  TNode<Context> context = CAST(Parameter(Descriptor::kContext));
  TNode<JSObject> receiver = CAST(Parameter(Descriptor::kReceiver));
  TNode<Object> key = CAST(Parameter(Descriptor::kKey));
  TNode<Object> value = CAST(Parameter(Descriptor::kValue));

  KeyedStoreGenericGenerator::SetPropertyInLiteral(state(), context, receiver,
                                                   key, value);
}

858 859
}  // namespace internal
}  // namespace v8