// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/builtins/builtins-constructor-gen.h"

#include "src/ast/ast.h"
#include "src/builtins/builtins-call-gen.h"
#include "src/builtins/builtins-constructor.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
#include "src/counters.h"
#include "src/interface-descriptors.h"
#include "src/objects-inl.h"

namespace v8 {
namespace internal {

void Builtins::Generate_ConstructVarargs(MacroAssembler* masm) {
  Generate_CallOrConstructVarargs(masm,
                                  masm->isolate()->builtins()->Construct());
}

void Builtins::Generate_ConstructForwardVarargs(MacroAssembler* masm) {
  Generate_CallOrConstructForwardVarargs(
      masm, masm->isolate()->builtins()->Construct());
}

void Builtins::Generate_ConstructFunctionForwardVarargs(MacroAssembler* masm) {
  Generate_CallOrConstructForwardVarargs(
      masm, masm->isolate()->builtins()->ConstructFunction());
}

TF_BUILTIN(ConstructWithArrayLike, CallOrConstructBuiltinsAssembler) {
  Node* target = Parameter(ConstructWithArrayLikeDescriptor::kTarget);
  Node* new_target = Parameter(ConstructWithArrayLikeDescriptor::kNewTarget);
  Node* arguments_list =
      Parameter(ConstructWithArrayLikeDescriptor::kArgumentsList);
  Node* context = Parameter(ConstructWithArrayLikeDescriptor::kContext);
  CallOrConstructWithArrayLike(target, new_target, arguments_list, context);
}

typedef compiler::Node Node;

Node* ConstructorBuiltinsAssembler::CopyFixedArrayBase(Node* fixed_array) {
  Label if_fixed_array(this), if_fixed_double_array(this), done(this);
  VARIABLE(result, MachineRepresentation::kTagged);
  Node* capacity = LoadAndUntagFixedArrayBaseLength(fixed_array);
  Branch(IsFixedDoubleArrayMap(LoadMap(fixed_array)), &if_fixed_double_array,
         &if_fixed_array);
  BIND(&if_fixed_double_array);
  {
    ElementsKind kind = FAST_DOUBLE_ELEMENTS;
    Node* copy = AllocateFixedArray(kind, capacity);
    CopyFixedArrayElements(kind, fixed_array, kind, copy, capacity, capacity,
                           SKIP_WRITE_BARRIER);
    result.Bind(copy);
    Goto(&done);
  }

  BIND(&if_fixed_array);
  {
    ElementsKind kind = FAST_ELEMENTS;
    Node* copy = AllocateFixedArray(kind, capacity);
    CopyFixedArrayElements(kind, fixed_array, kind, copy, capacity, capacity,
                           UPDATE_WRITE_BARRIER);
    result.Bind(copy);
    Goto(&done);
  }
  BIND(&done);
  // Manually copy over the map of the incoming array to preserve the elements
  // kind.
  StoreMap(result.value(), LoadMap(fixed_array));
  return result.value();
}

Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
                                                       Node* feedback_vector,
                                                       Node* slot,
                                                       Node* context) {
  Isolate* isolate = this->isolate();
  Factory* factory = isolate->factory();
  IncrementCounter(isolate->counters()->fast_new_closure_total(), 1);

  // Create a new closure from the given function info in new space
  Node* result = Allocate(JSFunction::kSize);

  // Calculate the index of the map we should install on the function based on
  // the FunctionKind and LanguageMode of the function.
  // Note: Must be kept in sync with Context::FunctionMapIndex
  Node* compiler_hints =
      LoadObjectField(shared_info, SharedFunctionInfo::kCompilerHintsOffset,
                      MachineType::Uint32());
  Node* is_strict = Word32And(
      compiler_hints, Int32Constant(1 << SharedFunctionInfo::kStrictModeBit));

  Label if_normal(this), if_generator(this), if_async(this),
      if_class_constructor(this), if_function_without_prototype(this),
      load_map(this);
  VARIABLE(map_index, MachineType::PointerRepresentation());

  STATIC_ASSERT(FunctionKind::kNormalFunction == 0);
  Node* is_not_normal =
      Word32And(compiler_hints,
                Int32Constant(SharedFunctionInfo::kAllFunctionKindBitsMask));
  GotoIfNot(is_not_normal, &if_normal);

  Node* is_generator = Word32And(
      compiler_hints, Int32Constant(FunctionKind::kGeneratorFunction
                                    << SharedFunctionInfo::kFunctionKindShift));
  GotoIf(is_generator, &if_generator);

  Node* is_async = Word32And(
      compiler_hints, Int32Constant(FunctionKind::kAsyncFunction
                                    << SharedFunctionInfo::kFunctionKindShift));
  GotoIf(is_async, &if_async);

  Node* is_class_constructor = Word32And(
      compiler_hints, Int32Constant(FunctionKind::kClassConstructor
                                    << SharedFunctionInfo::kFunctionKindShift));
  GotoIf(is_class_constructor, &if_class_constructor);

  if (FLAG_debug_code) {
    // Function must be a function without a prototype.
    CSA_ASSERT(
        this,
        Word32And(compiler_hints,
                  Int32Constant((FunctionKind::kAccessorFunction |
                                 FunctionKind::kArrowFunction |
                                 FunctionKind::kConciseMethod)
                                << SharedFunctionInfo::kFunctionKindShift)));
  }
  Goto(&if_function_without_prototype);

  BIND(&if_normal);
  {
    map_index.Bind(SelectIntPtrConstant(is_strict,
                                        Context::STRICT_FUNCTION_MAP_INDEX,
                                        Context::SLOPPY_FUNCTION_MAP_INDEX));
    Goto(&load_map);
  }

  BIND(&if_generator);
  {
    Node* is_async =
        Word32And(compiler_hints,
                  Int32Constant(FunctionKind::kAsyncFunction
                                << SharedFunctionInfo::kFunctionKindShift));
    map_index.Bind(SelectIntPtrConstant(
        is_async, Context::ASYNC_GENERATOR_FUNCTION_MAP_INDEX,
        Context::GENERATOR_FUNCTION_MAP_INDEX));
    Goto(&load_map);
  }

  BIND(&if_async);
  {
    map_index.Bind(IntPtrConstant(Context::ASYNC_FUNCTION_MAP_INDEX));
    Goto(&load_map);
  }

  BIND(&if_class_constructor);
  {
    map_index.Bind(IntPtrConstant(Context::CLASS_FUNCTION_MAP_INDEX));
    Goto(&load_map);
  }

  BIND(&if_function_without_prototype);
  {
    map_index.Bind(
        IntPtrConstant(Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
    Goto(&load_map);
  }

  BIND(&load_map);

  // Get the function map in the current native context and set that
  // as the map of the allocated object.
  Node* native_context = LoadNativeContext(context);
  Node* map_slot_value =
      LoadFixedArrayElement(native_context, map_index.value());
  StoreMapNoWriteBarrier(result, map_slot_value);

  // Initialize the rest of the function.
  Node* empty_fixed_array = HeapConstant(factory->empty_fixed_array());
  StoreObjectFieldNoWriteBarrier(result, JSObject::kPropertiesOffset,
                                 empty_fixed_array);
  StoreObjectFieldNoWriteBarrier(result, JSObject::kElementsOffset,
                                 empty_fixed_array);
  Node* literals_cell = LoadFixedArrayElement(
      feedback_vector, slot, 0, CodeStubAssembler::SMI_PARAMETERS);
  {
    // Bump the closure counter encoded in the cell's map.
    Node* cell_map = LoadMap(literals_cell);
    Label no_closures(this), one_closure(this), cell_done(this);

    GotoIf(IsNoClosuresCellMap(cell_map), &no_closures);
    GotoIf(IsOneClosureCellMap(cell_map), &one_closure);
    CSA_ASSERT(this, IsManyClosuresCellMap(cell_map));
    Goto(&cell_done);

    BIND(&no_closures);
    StoreMapNoWriteBarrier(literals_cell, Heap::kOneClosureCellMapRootIndex);
    Goto(&cell_done);

    BIND(&one_closure);
    StoreMapNoWriteBarrier(literals_cell, Heap::kManyClosuresCellMapRootIndex);
    Goto(&cell_done);

    BIND(&cell_done);
  }
  {
    // If the feedback vector has optimized code, check whether it is marked
    // for deopt and, if so, clear it.
    Label optimized_code_ok(this);
    Node* literals = LoadObjectField(literals_cell, Cell::kValueOffset);
    GotoIfNot(IsFeedbackVector(literals), &optimized_code_ok);
    Node* optimized_code_cell =
        LoadFixedArrayElement(literals, FeedbackVector::kOptimizedCodeIndex);
    Node* optimized_code =
        LoadWeakCellValue(optimized_code_cell, &optimized_code_ok);
    Node* code_flags = LoadObjectField(
        optimized_code, Code::kKindSpecificFlags1Offset, MachineType::Uint32());
    Node* marked_for_deopt =
        DecodeWord32<Code::MarkedForDeoptimizationField>(code_flags);
    GotoIf(Word32Equal(marked_for_deopt, Int32Constant(0)), &optimized_code_ok);

    // Code is marked for deopt, clear the optimized code slot.
    StoreFixedArrayElement(literals, FeedbackVector::kOptimizedCodeIndex,
                           EmptyWeakCellConstant(), SKIP_WRITE_BARRIER);
    Goto(&optimized_code_ok);

    BIND(&optimized_code_ok);
  }
  StoreObjectFieldNoWriteBarrier(result, JSFunction::kFeedbackVectorOffset,
                                 literals_cell);
  StoreObjectFieldNoWriteBarrier(
      result, JSFunction::kPrototypeOrInitialMapOffset, TheHoleConstant());
  StoreObjectFieldNoWriteBarrier(result, JSFunction::kSharedFunctionInfoOffset,
                                 shared_info);
  StoreObjectFieldNoWriteBarrier(result, JSFunction::kContextOffset, context);
  Handle<Code> lazy_builtin_handle(
      isolate->builtins()->builtin(Builtins::kCompileLazy));
  Node* lazy_builtin = HeapConstant(lazy_builtin_handle);
  Node* lazy_builtin_entry =
      IntPtrAdd(BitcastTaggedToWord(lazy_builtin),
                IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
  StoreObjectFieldNoWriteBarrier(result, JSFunction::kCodeEntryOffset,
                                 lazy_builtin_entry,
                                 MachineType::PointerRepresentation());
  StoreObjectFieldNoWriteBarrier(result, JSFunction::kNextFunctionLinkOffset,
                                 UndefinedConstant());

  return result;
}

TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
  Node* shared = Parameter(FastNewClosureDescriptor::kSharedFunctionInfo);
  Node* context = Parameter(FastNewClosureDescriptor::kContext);
  Node* vector = Parameter(FastNewClosureDescriptor::kVector);
  Node* slot = Parameter(FastNewClosureDescriptor::kSlot);
  Return(EmitFastNewClosure(shared, vector, slot, context));
}

TF_BUILTIN(FastNewObject, ConstructorBuiltinsAssembler) {
  Node* context = Parameter(Descriptor::kContext);
  Node* target = Parameter(Descriptor::kTarget);
  Node* new_target = Parameter(Descriptor::kNewTarget);

  Label call_runtime(this);

  Node* result = EmitFastNewObject(context, target, new_target, &call_runtime);
  Return(result);

  BIND(&call_runtime);
  TailCallRuntime(Runtime::kNewObject, context, target, new_target);
}

Node* ConstructorBuiltinsAssembler::EmitFastNewObject(Node* context,
                                                      Node* target,
                                                      Node* new_target) {
  VARIABLE(var_obj, MachineRepresentation::kTagged);
  Label call_runtime(this), end(this);

  Node* result = EmitFastNewObject(context, target, new_target, &call_runtime);
  var_obj.Bind(result);
  Goto(&end);

  BIND(&call_runtime);
  var_obj.Bind(CallRuntime(Runtime::kNewObject, context, target, new_target));
  Goto(&end);

  BIND(&end);
  return var_obj.value();
}

Node* ConstructorBuiltinsAssembler::EmitFastNewObject(Node* context,
                                                      Node* target,
                                                      Node* new_target,
                                                      Label* call_runtime) {
  CSA_ASSERT(this, HasInstanceType(target, JS_FUNCTION_TYPE));
  CSA_ASSERT(this, IsJSReceiver(new_target));

  // Verify that the new target is a JSFunction.
  Label fast(this), end(this);
  GotoIf(HasInstanceType(new_target, JS_FUNCTION_TYPE), &fast);
  Goto(call_runtime);

  BIND(&fast);

  // Load the initial map and verify that it's in fact a map.
  Node* initial_map =
      LoadObjectField(new_target, JSFunction::kPrototypeOrInitialMapOffset);
  GotoIf(TaggedIsSmi(initial_map), call_runtime);
  GotoIf(DoesntHaveInstanceType(initial_map, MAP_TYPE), call_runtime);

  // Fall back to runtime if the target differs from the new target's
  // initial map constructor.
  Node* new_target_constructor =
      LoadObjectField(initial_map, Map::kConstructorOrBackPointerOffset);
  GotoIf(WordNotEqual(target, new_target_constructor), call_runtime);

  VARIABLE(properties, MachineRepresentation::kTagged);

  Label instantiate_map(this), allocate_properties(this);
  GotoIf(IsDictionaryMap(initial_map), &allocate_properties);
  {
    properties.Bind(EmptyFixedArrayConstant());
    Goto(&instantiate_map);
  }
  BIND(&allocate_properties);
  {
    properties.Bind(AllocateNameDictionary(NameDictionary::kInitialCapacity));
    Goto(&instantiate_map);
  }

  BIND(&instantiate_map);

  Node* object = AllocateJSObjectFromMap(initial_map, properties.value());

  // Perform in-object slack tracking if requested.
  HandleSlackTracking(context, object, initial_map, JSObject::kHeaderSize);
  return object;
}

Node* ConstructorBuiltinsAssembler::EmitFastNewFunctionContext(
    Node* function, Node* slots, Node* context, ScopeType scope_type) {
  slots = ChangeUint32ToWord(slots);

  // TODO(ishell): Use CSA::OptimalParameterMode() here.
  ParameterMode mode = INTPTR_PARAMETERS;
  Node* min_context_slots = IntPtrConstant(Context::MIN_CONTEXT_SLOTS);
  Node* length = IntPtrAdd(slots, min_context_slots);
  Node* size = GetFixedArrayAllocationSize(length, FAST_ELEMENTS, mode);

  // Create a new closure from the given function info in new space
  Node* function_context = AllocateInNewSpace(size);

  Heap::RootListIndex context_type;
  switch (scope_type) {
    case EVAL_SCOPE:
      context_type = Heap::kEvalContextMapRootIndex;
      break;
    case FUNCTION_SCOPE:
      context_type = Heap::kFunctionContextMapRootIndex;
      break;
    default:
      UNREACHABLE();
  }
  StoreMapNoWriteBarrier(function_context, context_type);
  StoreObjectFieldNoWriteBarrier(function_context, Context::kLengthOffset,
                                 SmiTag(length));

  // Set up the fixed slots.
  StoreFixedArrayElement(function_context, Context::CLOSURE_INDEX, function,
                         SKIP_WRITE_BARRIER);
  StoreFixedArrayElement(function_context, Context::PREVIOUS_INDEX, context,
                         SKIP_WRITE_BARRIER);
  StoreFixedArrayElement(function_context, Context::EXTENSION_INDEX,
                         TheHoleConstant(), SKIP_WRITE_BARRIER);

  // Copy the native context from the previous context.
  Node* native_context = LoadNativeContext(context);
  StoreFixedArrayElement(function_context, Context::NATIVE_CONTEXT_INDEX,
                         native_context, SKIP_WRITE_BARRIER);

  // Initialize the rest of the slots to undefined.
  Node* undefined = UndefinedConstant();
  BuildFastFixedArrayForEach(
      function_context, FAST_ELEMENTS, min_context_slots, length,
      [this, undefined](Node* context, Node* offset) {
        StoreNoWriteBarrier(MachineRepresentation::kTagged, context, offset,
                            undefined);
      },
      mode);

  return function_context;
}

TF_BUILTIN(FastNewFunctionContextEval, ConstructorBuiltinsAssembler) {
  Node* function = Parameter(FastNewFunctionContextDescriptor::kFunction);
  Node* slots = Parameter(FastNewFunctionContextDescriptor::kSlots);
  Node* context = Parameter(FastNewFunctionContextDescriptor::kContext);
  Return(EmitFastNewFunctionContext(function, slots, context,
                                    ScopeType::EVAL_SCOPE));
}

TF_BUILTIN(FastNewFunctionContextFunction, ConstructorBuiltinsAssembler) {
  Node* function = Parameter(FastNewFunctionContextDescriptor::kFunction);
  Node* slots = Parameter(FastNewFunctionContextDescriptor::kSlots);
  Node* context = Parameter(FastNewFunctionContextDescriptor::kContext);
  Return(EmitFastNewFunctionContext(function, slots, context,
                                    ScopeType::FUNCTION_SCOPE));
}

Node* ConstructorBuiltinsAssembler::EmitFastCloneRegExp(Node* closure,
                                                        Node* literal_index,
                                                        Node* pattern,
                                                        Node* flags,
                                                        Node* context) {
  Label call_runtime(this, Label::kDeferred), end(this);

  VARIABLE(result, MachineRepresentation::kTagged);

  Node* cell = LoadObjectField(closure, JSFunction::kFeedbackVectorOffset);
  Node* feedback_vector = LoadObjectField(cell, Cell::kValueOffset);
  Node* boilerplate = LoadFixedArrayElement(feedback_vector, literal_index, 0,
                                            CodeStubAssembler::SMI_PARAMETERS);
  GotoIf(IsUndefined(boilerplate), &call_runtime);

  {
    int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
    Node* copy = Allocate(size);
    for (int offset = 0; offset < size; offset += kPointerSize) {
      Node* value = LoadObjectField(boilerplate, offset);
      StoreObjectFieldNoWriteBarrier(copy, offset, value);
    }
    result.Bind(copy);
    Goto(&end);
  }

  BIND(&call_runtime);
  {
    result.Bind(CallRuntime(Runtime::kCreateRegExpLiteral, context, closure,
                            literal_index, pattern, flags));
    Goto(&end);
  }

  BIND(&end);
  return result.value();
}

TF_BUILTIN(FastCloneRegExp, ConstructorBuiltinsAssembler) {
  Node* closure = Parameter(FastCloneRegExpDescriptor::kClosure);
  Node* literal_index = Parameter(FastCloneRegExpDescriptor::kLiteralIndex);
  Node* pattern = Parameter(FastCloneRegExpDescriptor::kPattern);
  Node* flags = Parameter(FastCloneRegExpDescriptor::kFlags);
  Node* context = Parameter(FastCloneRegExpDescriptor::kContext);

  Return(EmitFastCloneRegExp(closure, literal_index, pattern, flags, context));
}

Node* ConstructorBuiltinsAssembler::NonEmptyShallowClone(
    Node* boilerplate, Node* boilerplate_map, Node* boilerplate_elements,
    Node* allocation_site, Node* capacity, ElementsKind kind) {
  ParameterMode param_mode = OptimalParameterMode();

  Node* length = LoadJSArrayLength(boilerplate);
  capacity = TaggedToParameter(capacity, param_mode);

  Node *array, *elements;
  std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
      kind, boilerplate_map, length, allocation_site, capacity, param_mode);

  Comment("copy elements header");
  // Header consists of map and length.
  STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
  StoreMap(elements, LoadMap(boilerplate_elements));
  {
    int offset = FixedArrayBase::kLengthOffset;
    StoreObjectFieldNoWriteBarrier(
        elements, offset, LoadObjectField(boilerplate_elements, offset));
  }

  length = TaggedToParameter(length, param_mode);

  Comment("copy boilerplate elements");
  CopyFixedArrayElements(kind, boilerplate_elements, elements, length,
                         SKIP_WRITE_BARRIER, param_mode);
  IncrementCounter(isolate()->counters()->inlined_copied_elements(), 1);

  return array;
}

Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowArray(
    Node* closure, Node* literal_index, Node* context, Label* call_runtime,
    AllocationSiteMode allocation_site_mode) {
  Label zero_capacity(this), cow_elements(this), fast_elements(this),
      return_result(this);
  VARIABLE(result, MachineRepresentation::kTagged);

  Node* cell = LoadObjectField(closure, JSFunction::kFeedbackVectorOffset);
  Node* feedback_vector = LoadObjectField(cell, Cell::kValueOffset);
  Node* allocation_site = LoadFixedArrayElement(
      feedback_vector, literal_index, 0, CodeStubAssembler::SMI_PARAMETERS);

  GotoIf(IsUndefined(allocation_site), call_runtime);
  allocation_site = LoadFixedArrayElement(feedback_vector, literal_index, 0,
                                          CodeStubAssembler::SMI_PARAMETERS);

  Node* boilerplate =
      LoadObjectField(allocation_site, AllocationSite::kTransitionInfoOffset);
  Node* boilerplate_map = LoadMap(boilerplate);
  Node* boilerplate_elements = LoadElements(boilerplate);
  Node* capacity = LoadFixedArrayBaseLength(boilerplate_elements);
  allocation_site =
      allocation_site_mode == TRACK_ALLOCATION_SITE ? allocation_site : nullptr;

  Node* zero = SmiConstant(Smi::kZero);
  GotoIf(SmiEqual(capacity, zero), &zero_capacity);

  Node* elements_map = LoadMap(boilerplate_elements);
  GotoIf(IsFixedCOWArrayMap(elements_map), &cow_elements);

  GotoIf(IsFixedArrayMap(elements_map), &fast_elements);
  {
    Comment("fast double elements path");
    if (FLAG_debug_code) CSA_CHECK(this, IsFixedDoubleArrayMap(elements_map));
    Node* array =
        NonEmptyShallowClone(boilerplate, boilerplate_map, boilerplate_elements,
                             allocation_site, capacity, FAST_DOUBLE_ELEMENTS);
    result.Bind(array);
    Goto(&return_result);
  }

  BIND(&fast_elements);
  {
    Comment("fast elements path");
    Node* array =
        NonEmptyShallowClone(boilerplate, boilerplate_map, boilerplate_elements,
                             allocation_site, capacity, FAST_ELEMENTS);
    result.Bind(array);
    Goto(&return_result);
  }

  VARIABLE(length, MachineRepresentation::kTagged);
  VARIABLE(elements, MachineRepresentation::kTagged);
  Label allocate_without_elements(this);

  BIND(&cow_elements);
  {
    Comment("fixed cow path");
    length.Bind(LoadJSArrayLength(boilerplate));
    elements.Bind(boilerplate_elements);

    Goto(&allocate_without_elements);
  }

  BIND(&zero_capacity);
  {
    Comment("zero capacity path");
    length.Bind(zero);
    elements.Bind(LoadRoot(Heap::kEmptyFixedArrayRootIndex));

    Goto(&allocate_without_elements);
  }

  BIND(&allocate_without_elements);
  {
    Node* array = AllocateUninitializedJSArrayWithoutElements(
        FAST_ELEMENTS, boilerplate_map, length.value(), allocation_site);
    StoreObjectField(array, JSObject::kElementsOffset, elements.value());
    result.Bind(array);
    Goto(&return_result);
  }

  BIND(&return_result);
  return result.value();
}

void ConstructorBuiltinsAssembler::CreateFastCloneShallowArrayBuiltin(
    AllocationSiteMode allocation_site_mode) {
  Node* closure = Parameter(FastCloneShallowArrayDescriptor::kClosure);
  Node* literal_index =
      Parameter(FastCloneShallowArrayDescriptor::kLiteralIndex);
  Node* constant_elements =
      Parameter(FastCloneShallowArrayDescriptor::kConstantElements);
  Node* context = Parameter(FastCloneShallowArrayDescriptor::kContext);
  Label call_runtime(this, Label::kDeferred);
  Return(EmitFastCloneShallowArray(closure, literal_index, context,
                                   &call_runtime, allocation_site_mode));

  BIND(&call_runtime);
  {
    Comment("call runtime");
    Node* flags =
        SmiConstant(Smi::FromInt(ArrayLiteral::kShallowElements |
                                 (allocation_site_mode == TRACK_ALLOCATION_SITE
                                      ? 0
                                      : ArrayLiteral::kDisableMementos)));
    Return(CallRuntime(Runtime::kCreateArrayLiteral, context, closure,
                       literal_index, constant_elements, flags));
  }
}

TF_BUILTIN(FastCloneShallowArrayTrack, ConstructorBuiltinsAssembler) {
  CreateFastCloneShallowArrayBuiltin(TRACK_ALLOCATION_SITE);
}

TF_BUILTIN(FastCloneShallowArrayDontTrack, ConstructorBuiltinsAssembler) {
  CreateFastCloneShallowArrayBuiltin(DONT_TRACK_ALLOCATION_SITE);
}

Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowObject(
    Label* call_runtime, Node* closure, Node* literals_index) {
  Node* allocation_site;
  {
    // Load the alloation site.
    Node* cell = LoadObjectField(closure, JSFunction::kFeedbackVectorOffset);
    Node* feedback_vector = LoadObjectField(cell, Cell::kValueOffset);
    allocation_site = LoadFixedArrayElement(feedback_vector, literals_index, 0,
                                            CodeStubAssembler::SMI_PARAMETERS);
    GotoIf(IsUndefined(allocation_site), call_runtime);
  }

  Node* boilerplate =
      LoadObjectField(allocation_site, AllocationSite::kTransitionInfoOffset);
  Node* boilerplate_map = LoadMap(boilerplate);

  VARIABLE(var_properties, MachineRepresentation::kTagged);
  {
    // Directly copy over the property store for dict-mode boilerplates.
    Label if_dictionary(this), if_fast(this), done(this);
    Branch(IsDictionaryMap(boilerplate_map), &if_dictionary, &if_fast);
    BIND(&if_dictionary);
    {
      Comment("Copy dictionary properties");
      var_properties.Bind(
          CopyNameDictionary(LoadProperties(boilerplate), call_runtime));
      // Slow objects have no in-object properties.
      Goto(&done);
    }
    BIND(&if_fast);
    {
      // TODO(cbruni): support copying out-of-object properties.
      Node* boilerplate_properties = LoadProperties(boilerplate);
      GotoIfNot(IsEmptyFixedArray(boilerplate_properties), call_runtime);
      var_properties.Bind(EmptyFixedArrayConstant());
      Goto(&done);
    }
    BIND(&done);
  }

  VARIABLE(var_elements, MachineRepresentation::kTagged);
  {
    // Copy the elements backing store, assuming that it's flat.
    Label if_empty_fixed_array(this), if_copy_elements(this), done(this);
    Node* boilerplate_elements = LoadElements(boilerplate);
    Branch(IsEmptyFixedArray(boilerplate_elements), &if_empty_fixed_array,
           &if_copy_elements);

    BIND(&if_empty_fixed_array);
    var_elements.Bind(boilerplate_elements);
    Goto(&done);

    BIND(&if_copy_elements);
    CSA_ASSERT(this, Word32BinaryNot(
                         IsFixedCOWArrayMap(LoadMap(boilerplate_elements))));
    var_elements.Bind(CopyFixedArrayBase(boilerplate_elements));
    Goto(&done);
    BIND(&done);
  }

  // Ensure new-space allocation for a fresh JSObject so we can skip write
  // barriers when copying all object fields.
  STATIC_ASSERT(JSObject::kMaxInstanceSize < kMaxRegularHeapObjectSize);
  Node* instance_size = TimesPointerSize(LoadMapInstanceSize(boilerplate_map));
  Node* allocation_size = instance_size;
  if (FLAG_allocation_site_pretenuring) {
    // Prepare for inner-allocating the AllocationMemento.
    allocation_size =
        IntPtrAdd(instance_size, IntPtrConstant(AllocationMemento::kSize));
  }

  Node* copy = AllocateInNewSpace(allocation_size);
  {
    Comment("Initialize Literal Copy");
    // Initialize Object fields.
    StoreMapNoWriteBarrier(copy, boilerplate_map);
    StoreObjectFieldNoWriteBarrier(copy, JSObject::kPropertiesOffset,
                                   var_properties.value());
    StoreObjectFieldNoWriteBarrier(copy, JSObject::kElementsOffset,
                                   var_elements.value());
  }

  // Initialize the AllocationMemento before potential GCs due to heap number
  // allocation when copying the in-object properties.
  if (FLAG_allocation_site_pretenuring) {
    Comment("Initialize AllocationMemento");
    Node* memento = InnerAllocate(copy, instance_size);
    StoreMapNoWriteBarrier(memento, Heap::kAllocationMementoMapRootIndex);
    StoreObjectFieldNoWriteBarrier(
        memento, AllocationMemento::kAllocationSiteOffset, allocation_site);
    Node* memento_create_count = LoadObjectField(
        allocation_site, AllocationSite::kPretenureCreateCountOffset);
    memento_create_count =
        SmiAdd(memento_create_count, SmiConstant(Smi::FromInt(1)));
    StoreObjectFieldNoWriteBarrier(allocation_site,
                                   AllocationSite::kPretenureCreateCountOffset,
                                   memento_create_count);
  }

  {
    // Copy over in-object properties.
    Label continue_with_write_barrier(this), done_init(this);
    VARIABLE(offset, MachineType::PointerRepresentation(),
             IntPtrConstant(JSObject::kHeaderSize));
    // Mutable heap numbers only occur on 32-bit platforms.
    bool may_use_mutable_heap_numbers =
        FLAG_track_double_fields && !FLAG_unbox_double_fields;
    {
      Comment("Copy in-object properties fast");
      Label continue_fast(this, &offset);
      Branch(WordEqual(offset.value(), instance_size), &done_init,
             &continue_fast);
      BIND(&continue_fast);
      Node* field = LoadObjectField(boilerplate, offset.value());
      if (may_use_mutable_heap_numbers) {
        Label store_field(this);
        GotoIf(TaggedIsSmi(field), &store_field);
        GotoIf(IsMutableHeapNumber(field), &continue_with_write_barrier);
        Goto(&store_field);
        BIND(&store_field);
      }
      StoreObjectFieldNoWriteBarrier(copy, offset.value(), field);
      offset.Bind(IntPtrAdd(offset.value(), IntPtrConstant(kPointerSize)));
      Branch(WordNotEqual(offset.value(), instance_size), &continue_fast,
             &done_init);
    }

    if (!may_use_mutable_heap_numbers) {
      BIND(&done_init);
      return copy;
    }
    // Continue initializing the literal after seeing the first sub-object
    // potentially causing allocation. In this case we prepare the new literal
    // by copying all pending fields over from the boilerplate and emit full
    // write barriers from here on.
    BIND(&continue_with_write_barrier);
    {
      Comment("Copy in-object properties slow");
      BuildFastLoop(offset.value(), instance_size,
                    [=](Node* offset) {
                      Node* field = LoadObjectField(boilerplate, offset);
                      StoreObjectFieldNoWriteBarrier(copy, offset, field);
                    },
                    kPointerSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
      Comment("Copy mutable HeapNumber values");
      BuildFastLoop(offset.value(), instance_size,
                    [=](Node* offset) {
                      Node* field = LoadObjectField(copy, offset);
                      Label copy_mutable_heap_number(this, Label::kDeferred),
                          continue_loop(this);
                      // We only have to clone complex field values.
                      GotoIf(TaggedIsSmi(field), &continue_loop);
                      Branch(IsMutableHeapNumber(field),
                             &copy_mutable_heap_number, &continue_loop);
                      BIND(&copy_mutable_heap_number);
                      {
                        Node* double_value = LoadHeapNumberValue(field);
                        Node* mutable_heap_number =
                            AllocateHeapNumberWithValue(double_value, MUTABLE);
                        StoreObjectField(copy, offset, mutable_heap_number);
                        Goto(&continue_loop);
                      }
                      BIND(&continue_loop);
                    },
                    kPointerSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
      Goto(&done_init);
    }
    BIND(&done_init);
  }
  return copy;
}

TF_BUILTIN(FastCloneShallowObject, ConstructorBuiltinsAssembler) {
  Label call_runtime(this);
  Node* closure = Parameter(Descriptor::kClosure);
  Node* literals_index = Parameter(Descriptor::kLiteralIndex);
  Node* copy =
      EmitFastCloneShallowObject(&call_runtime, closure, literals_index);
  Return(copy);

  BIND(&call_runtime);
  Node* boilerplate_description =
      Parameter(Descriptor::kBoilerplateDescription);
  Node* flags = Parameter(Descriptor::kFlags);
  Node* context = Parameter(Descriptor::kContext);
  TailCallRuntime(Runtime::kCreateObjectLiteral, context, closure,
                  literals_index, boilerplate_description, flags);
}

}  // namespace internal
}  // namespace v8