builtins.cc 57.4 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5
#include "src/v8.h"
6

7 8
#include "src/api.h"
#include "src/arguments.h"
9
#include "src/base/once.h"
10 11 12 13 14
#include "src/bootstrapper.h"
#include "src/builtins.h"
#include "src/cpu-profiler.h"
#include "src/gdb-jit.h"
#include "src/heap-profiler.h"
15
#include "src/ic-inl.h"
16 17 18
#include "src/mark-compact.h"
#include "src/stub-cache.h"
#include "src/vm-state-inl.h"
19

20 21
namespace v8 {
namespace internal {
22

23 24 25 26 27 28
namespace {

// Arguments object passed to C++ builtins.
template <BuiltinExtraArguments extra_args>
class BuiltinArguments : public Arguments {
 public:
vitalyr@chromium.org's avatar
vitalyr@chromium.org committed
29 30 31
  BuiltinArguments(int length, Object** arguments)
      : Arguments(length, arguments) { }

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
  Object*& operator[] (int index) {
    ASSERT(index < length());
    return Arguments::operator[](index);
  }

  template <class S> Handle<S> at(int index) {
    ASSERT(index < length());
    return Arguments::at<S>(index);
  }

  Handle<Object> receiver() {
    return Arguments::at<Object>(0);
  }

  Handle<JSFunction> called_function() {
    STATIC_ASSERT(extra_args == NEEDS_CALLED_FUNCTION);
    return Arguments::at<JSFunction>(Arguments::length() - 1);
  }

  // Gets the total number of arguments including the receiver (but
  // excluding extra arguments).
  int length() const {
    STATIC_ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
    return Arguments::length();
  }

#ifdef DEBUG
  void Verify() {
    // Check we have at least the receiver.
    ASSERT(Arguments::length() >= 1);
  }
#endif
};


// Specialize BuiltinArguments for the called function extra argument.

template <>
int BuiltinArguments<NEEDS_CALLED_FUNCTION>::length() const {
  return Arguments::length() - 1;
}

#ifdef DEBUG
template <>
void BuiltinArguments<NEEDS_CALLED_FUNCTION>::Verify() {
  // Check we have at least the receiver and the called function.
  ASSERT(Arguments::length() >= 2);
  // Make sure cast to JSFunction succeeds.
  called_function();
}
#endif


#define DEF_ARG_TYPE(name, spec)                      \
  typedef BuiltinArguments<spec> name##ArgumentsType;
BUILTIN_LIST_C(DEF_ARG_TYPE)
#undef DEF_ARG_TYPE

}  // namespace

92
// ----------------------------------------------------------------------------
93
// Support macro for defining builtins in C++.
94 95 96 97
// ----------------------------------------------------------------------------
//
// A builtin function is defined by writing:
//
98
//   BUILTIN(name) {
99 100 101
//     ...
//   }
//
102 103
// In the body of the builtin function the arguments can be accessed
// through the BuiltinArguments object args.
104

105
#ifdef DEBUG
106

107
#define BUILTIN(name)                                            \
108
  MUST_USE_RESULT static Object* Builtin_Impl_##name(            \
109
      name##ArgumentsType args, Isolate* isolate);               \
110
  MUST_USE_RESULT static Object* Builtin_##name(                 \
111 112 113 114 115
      int args_length, Object** args_object, Isolate* isolate) { \
    name##ArgumentsType args(args_length, args_object);          \
    args.Verify();                                               \
    return Builtin_Impl_##name(args, isolate);                   \
  }                                                              \
116
  MUST_USE_RESULT static Object* Builtin_Impl_##name(            \
117
      name##ArgumentsType args, Isolate* isolate)
118

119
#else  // For release mode.
120

121
#define BUILTIN(name)                                            \
122
  static Object* Builtin_impl##name(                             \
123
      name##ArgumentsType args, Isolate* isolate);               \
124
  static Object* Builtin_##name(                                 \
125 126 127 128
      int args_length, Object** args_object, Isolate* isolate) { \
    name##ArgumentsType args(args_length, args_object);          \
    return Builtin_impl##name(args, isolate);                    \
  }                                                              \
129
  static Object* Builtin_impl##name(                             \
130
      name##ArgumentsType args, Isolate* isolate)
131
#endif
132 133


134
#ifdef DEBUG
135
static inline bool CalledAsConstructor(Isolate* isolate) {
136 137 138
  // Calculate the result using a full stack frame iterator and check
  // that the state of the stack is as we assume it to be in the
  // code below.
139
  StackFrameIterator it(isolate);
140 141 142
  ASSERT(it.frame()->is_exit());
  it.Advance();
  StackFrame* frame = it.frame();
143
  bool reference_result = frame->is_construct();
144
  Address fp = Isolate::c_entry_fp(isolate->thread_local_top());
145 146 147 148 149 150 151 152 153 154 155 156 157 158
  // Because we know fp points to an exit frame we can use the relevant
  // part of ExitFrame::ComputeCallerState directly.
  const int kCallerOffset = ExitFrameConstants::kCallerFPOffset;
  Address caller_fp = Memory::Address_at(fp + kCallerOffset);
  // This inlines the part of StackFrame::ComputeType that grabs the
  // type of the current frame.  Note that StackFrame::ComputeType
  // has been specialized for each architecture so if any one of them
  // changes this code has to be changed as well.
  const int kMarkerOffset = StandardFrameConstants::kMarkerOffset;
  const Smi* kConstructMarker = Smi::FromInt(StackFrame::CONSTRUCT);
  Object* marker = Memory::Object_at(caller_fp + kMarkerOffset);
  bool result = (marker == kConstructMarker);
  ASSERT_EQ(result, reference_result);
  return result;
159
}
160
#endif
161

162

163 164
// ----------------------------------------------------------------------------

165
BUILTIN(Illegal) {
166
  UNREACHABLE();
167
  return isolate->heap()->undefined_value();  // Make compiler happy.
168 169 170
}


171
BUILTIN(EmptyFunction) {
172
  return isolate->heap()->undefined_value();
173 174 175
}


176 177
static void MoveDoubleElements(FixedDoubleArray* dst, int dst_index,
                               FixedDoubleArray* src, int src_index, int len) {
178
  if (len == 0) return;
179 180
  MemMove(dst->data_start() + dst_index, src->data_start() + src_index,
          len * kDoubleSize);
181 182 183 184 185 186
}


static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
                                          FixedArrayBase* elms,
                                          int to_trim) {
187 188
  ASSERT(heap->CanMoveObjectStart(elms));

189 190 191 192 193 194 195
  Map* map = elms->map();
  int entry_size;
  if (elms->IsFixedArray()) {
    entry_size = kPointerSize;
  } else {
    entry_size = kDoubleSize;
  }
196
  ASSERT(elms->map() != heap->fixed_cow_array_map());
197
  // For now this trick is only applied to fixed arrays in new and paged space.
198 199
  // In large object space the object's start must coincide with chunk
  // and thus the trick is just not applicable.
200
  ASSERT(!heap->lo_space()->Contains(elms));
201

202 203 204
  STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
  STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
  STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
205 206 207 208 209

  Object** former_start = HeapObject::RawField(elms, 0);

  const int len = elms->length();

210
  if (to_trim * entry_size > FixedArrayBase::kHeaderSize &&
211
      elms->IsFixedArray() &&
212
      !heap->new_space()->Contains(elms)) {
213 214 215 216 217 218 219 220 221
    // If we are doing a big trim in old space then we zap the space that was
    // formerly part of the array so that the GC (aided by the card-based
    // remembered set) won't find pointers to new-space there.
    Object** zap = reinterpret_cast<Object**>(elms->address());
    zap++;  // Header of filler must be at least one word so skip that.
    for (int i = 1; i < to_trim; i++) {
      *zap++ = Smi::FromInt(0);
    }
  }
222 223 224
  // Technically in new space this write might be omitted (except for
  // debug mode which iterates through the heap), but to play safer
  // we still do it.
225 226
  // Since left trimming is only performed on pages which are not concurrently
  // swept creating a filler object does not require synchronization.
227
  heap->CreateFillerObjectAt(elms->address(), to_trim * entry_size);
228

229 230 231
  int new_start_index = to_trim * (entry_size / kPointerSize);
  former_start[new_start_index] = map;
  former_start[new_start_index + 1] = Smi::FromInt(len - to_trim);
232

233 234
  // Maintain marking consistency for HeapObjectIterator and
  // IncrementalMarking.
235
  int size_delta = to_trim * entry_size;
236 237 238
  Address new_start = elms->address() + size_delta;
  heap->marking()->TransferMark(elms->address(), new_start);
  heap->AdjustLiveBytes(new_start, -size_delta, Heap::FROM_MUTATOR);
239

240 241
  FixedArrayBase* new_elms =
      FixedArrayBase::cast(HeapObject::FromAddress(new_start));
242 243

  heap->OnMoveEvent(new_elms, elms, new_elms->Size());
244
  return new_elms;
245 246 247
}


248
static bool ArrayPrototypeHasNoElements(Heap* heap,
249
                                        Context* native_context,
250
                                        JSObject* array_proto) {
251
  DisallowHeapAllocation no_gc;
252 253
  // This method depends on non writability of Object and Array prototype
  // fields.
254
  if (array_proto->elements() != heap->empty_fixed_array()) return false;
255
  // Object.prototype
256
  Object* proto = array_proto->GetPrototype();
257
  if (proto == heap->null_value()) return false;
258
  array_proto = JSObject::cast(proto);
259
  if (array_proto != native_context->initial_object_prototype()) return false;
260
  if (array_proto->elements() != heap->empty_fixed_array()) return false;
261
  return array_proto->GetPrototype()->IsNull();
262 263 264
}


265
// Returns empty handle if not applicable.
266
MUST_USE_RESULT
267
static inline MaybeHandle<FixedArrayBase> EnsureJSArrayWithWritableFastElements(
268 269 270 271
    Isolate* isolate,
    Handle<Object> receiver,
    Arguments* args,
    int first_added_arg) {
272
  if (!receiver->IsJSArray()) return MaybeHandle<FixedArrayBase>();
273
  Handle<JSArray> array = Handle<JSArray>::cast(receiver);
274
  // If there may be elements accessors in the prototype chain, the fast path
275 276
  // cannot be used if there arguments to add to the array.
  if (args != NULL && array->map()->DictionaryElementsInPrototypeChainOnly()) {
277 278
    return MaybeHandle<FixedArrayBase>();
  }
279 280
  if (array->map()->is_observed()) return MaybeHandle<FixedArrayBase>();
  if (!array->map()->is_extensible()) return MaybeHandle<FixedArrayBase>();
281
  Handle<FixedArrayBase> elms(array->elements(), isolate);
282
  Heap* heap = isolate->heap();
283 284
  Map* map = elms->map();
  if (map == heap->fixed_array_map()) {
285
    if (args == NULL || array->HasFastObjectElements()) return elms;
286
  } else if (map == heap->fixed_cow_array_map()) {
287 288
    elms = JSObject::EnsureWritableFastElements(array);
    if (args == NULL || array->HasFastObjectElements()) return elms;
289 290
  } else if (map == heap->fixed_double_array_map()) {
    if (args == NULL) return elms;
291
  } else {
292
    return MaybeHandle<FixedArrayBase>();
293
  }
294 295 296 297

  // Need to ensure that the arguments passed in args can be contained in
  // the array.
  int args_length = args->length();
298
  if (first_added_arg >= args_length) return handle(array->elements(), isolate);
299

300 301 302
  ElementsKind origin_kind = array->map()->elements_kind();
  ASSERT(!IsFastObjectElementsKind(origin_kind));
  ElementsKind target_kind = origin_kind;
303 304 305 306 307 308 309 310 311 312 313 314 315
  {
    DisallowHeapAllocation no_gc;
    int arg_count = args->length() - first_added_arg;
    Object** arguments = args->arguments() - first_added_arg - (arg_count - 1);
    for (int i = 0; i < arg_count; i++) {
      Object* arg = arguments[i];
      if (arg->IsHeapObject()) {
        if (arg->IsHeapNumber()) {
          target_kind = FAST_DOUBLE_ELEMENTS;
        } else {
          target_kind = FAST_ELEMENTS;
          break;
        }
316 317 318 319
      }
    }
  }
  if (target_kind != origin_kind) {
320
    JSObject::TransitionElementsKind(array, target_kind);
321
    return handle(array->elements(), isolate);
322 323
  }
  return elms;
324 325 326
}


327 328
static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
                                                     JSArray* receiver) {
329
  if (!FLAG_clever_optimizations) return false;
330
  DisallowHeapAllocation no_gc;
331
  Context* native_context = heap->isolate()->context()->native_context();
332
  JSObject* array_proto =
333
      JSObject::cast(native_context->array_function()->prototype());
334
  return receiver->GetPrototype() == array_proto &&
335
         ArrayPrototypeHasNoElements(heap, native_context, array_proto);
336 337 338
}


339
MUST_USE_RESULT static Object* CallJsBuiltin(
340
    Isolate* isolate,
341 342
    const char* name,
    BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
343
  HandleScope handleScope(isolate);
344

345 346 347 348
  Handle<Object> js_builtin = Object::GetProperty(
      isolate,
      handle(isolate->native_context()->builtins(), isolate),
      name).ToHandleChecked();
349 350 351 352 353
  Handle<JSFunction> function = Handle<JSFunction>::cast(js_builtin);
  int argc = args.length() - 1;
  ScopedVector<Handle<Object> > argv(argc);
  for (int i = 0; i < argc; ++i) {
    argv[i] = args.at<Object>(i + 1);
354
  }
355 356 357 358 359 360 361 362
  Handle<Object> result;
  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
      isolate, result,
      Execution::Call(isolate,
                      function,
                      args.receiver(),
                      argc,
                      argv.start()));
363 364 365 366
  return *result;
}


367
BUILTIN(ArrayPush) {
368 369
  HandleScope scope(isolate);
  Handle<Object> receiver = args.receiver();
370
  MaybeHandle<FixedArrayBase> maybe_elms_obj =
371
      EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1);
372 373 374 375
  Handle<FixedArrayBase> elms_obj;
  if (!maybe_elms_obj.ToHandle(&elms_obj)) {
    return CallJsBuiltin(isolate, "ArrayPush", args);
  }
376 377

  Handle<JSArray> array = Handle<JSArray>::cast(receiver);
378 379 380 381 382
  int len = Smi::cast(array->length())->value();
  int to_add = args.length() - 1;
  if (to_add > 0 && JSArray::WouldChangeReadOnlyLength(array, len + to_add)) {
    return CallJsBuiltin(isolate, "ArrayPush", args);
  }
383 384
  ASSERT(!array->map()->is_observed());

385
  ElementsKind kind = array->GetElementsKind();
386

387
  if (IsFastSmiOrObjectElementsKind(kind)) {
388
    Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
389 390
    if (to_add == 0) {
      return Smi::FromInt(len);
391
    }
392 393 394
    // Currently fixed arrays cannot grow too big, so
    // we should never hit this case.
    ASSERT(to_add <= (Smi::kMaxValue - len));
395

396
    int new_length = len + to_add;
397

398 399 400
    if (new_length > elms->length()) {
      // New backing storage is needed.
      int capacity = new_length + (new_length >> 1) + 16;
401 402
      Handle<FixedArray> new_elms =
          isolate->factory()->NewUninitializedFixedArray(capacity);
403

404
      ElementsAccessor* accessor = array->GetElementsAccessor();
405
      accessor->CopyElements(
406 407
          elms_obj, 0, kind, new_elms, 0,
          ElementsAccessor::kCopyToEndAndInitializeToHole);
408

409 410
      elms = new_elms;
    }
411

412
    // Add the provided values.
413
    DisallowHeapAllocation no_gc;
414 415 416 417
    WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
    for (int index = 0; index < to_add; index++) {
      elms->set(index + len, args[index + 1], mode);
    }
418

419 420
    if (*elms != array->elements()) {
      array->set_elements(*elms);
421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
    }

    // Set the length.
    array->set_length(Smi::FromInt(new_length));
    return Smi::FromInt(new_length);
  } else {
    int elms_len = elms_obj->length();
    if (to_add == 0) {
      return Smi::FromInt(len);
    }
    // Currently fixed arrays cannot grow too big, so
    // we should never hit this case.
    ASSERT(to_add <= (Smi::kMaxValue - len));

    int new_length = len + to_add;

437
    Handle<FixedDoubleArray> new_elms;
438 439 440 441

    if (new_length > elms_len) {
      // New backing storage is needed.
      int capacity = new_length + (new_length >> 1) + 16;
442 443 444 445
      // Create new backing store; since capacity > 0, we can
      // safely cast to FixedDoubleArray.
      new_elms = Handle<FixedDoubleArray>::cast(
          isolate->factory()->NewFixedDoubleArray(capacity));
446

447
      ElementsAccessor* accessor = array->GetElementsAccessor();
448
      accessor->CopyElements(
449 450
          elms_obj, 0, kind, new_elms, 0,
          ElementsAccessor::kCopyToEndAndInitializeToHole);
451

452 453 454
    } else {
      // to_add is > 0 and new_length <= elms_len, so elms_obj cannot be the
      // empty_fixed_array.
455
      new_elms = Handle<FixedDoubleArray>::cast(elms_obj);
456 457 458
    }

    // Add the provided values.
459
    DisallowHeapAllocation no_gc;
460 461 462 463 464 465
    int index;
    for (index = 0; index < to_add; index++) {
      Object* arg = args[index + 1];
      new_elms->set(index + len, arg->Number());
    }

466 467
    if (*new_elms != array->elements()) {
      array->set_elements(*new_elms);
468 469 470 471 472 473
    }

    // Set the length.
    array->set_length(Smi::FromInt(new_length));
    return Smi::FromInt(new_length);
  }
474 475 476
}


477
BUILTIN(ArrayPop) {
478 479
  HandleScope scope(isolate);
  Handle<Object> receiver = args.receiver();
480
  MaybeHandle<FixedArrayBase> maybe_elms_obj =
481
      EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
482 483 484 485
  Handle<FixedArrayBase> elms_obj;
  if (!maybe_elms_obj.ToHandle(&elms_obj)) {
    return CallJsBuiltin(isolate, "ArrayPop", args);
  }
486 487

  Handle<JSArray> array = Handle<JSArray>::cast(receiver);
488
  ASSERT(!array->map()->is_observed());
489

490
  int len = Smi::cast(array->length())->value();
491
  if (len == 0) return isolate->heap()->undefined_value();
492

493 494
  ElementsAccessor* accessor = array->GetElementsAccessor();
  int new_length = len - 1;
495 496 497 498
  Handle<Object> element =
      accessor->Get(array, array, new_length, elms_obj).ToHandleChecked();
  if (element->IsTheHole()) {
    return CallJsBuiltin(isolate, "ArrayPop", args);
499
  }
500
  RETURN_FAILURE_ON_EXCEPTION(
501 502
      isolate,
      accessor->SetLength(array, handle(Smi::FromInt(new_length), isolate)));
503
  return *element;
504 505 506
}


507
BUILTIN(ArrayShift) {
508
  HandleScope scope(isolate);
509
  Heap* heap = isolate->heap();
510
  Handle<Object> receiver = args.receiver();
511
  MaybeHandle<FixedArrayBase> maybe_elms_obj =
512
      EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
513 514
  Handle<FixedArrayBase> elms_obj;
  if (!maybe_elms_obj.ToHandle(&elms_obj) ||
515 516
      !IsJSArrayFastElementMovingAllowed(heap,
                                         *Handle<JSArray>::cast(receiver))) {
517
    return CallJsBuiltin(isolate, "ArrayShift", args);
518
  }
519
  Handle<JSArray> array = Handle<JSArray>::cast(receiver);
520
  ASSERT(!array->map()->is_observed());
521

522
  int len = Smi::cast(array->length())->value();
523
  if (len == 0) return heap->undefined_value();
524

525
  // Get first element
526
  ElementsAccessor* accessor = array->GetElementsAccessor();
527 528
  Handle<Object> first =
    accessor->Get(array, array, 0, elms_obj).ToHandleChecked();
529
  if (first->IsTheHole()) {
530
    return CallJsBuiltin(isolate, "ArrayShift", args);
531
  }
532

533
  if (heap->CanMoveObjectStart(*elms_obj)) {
534
    array->set_elements(LeftTrimFixedArray(heap, *elms_obj, 1));
535 536
  } else {
    // Shift the elements.
537
    if (elms_obj->IsFixedArray()) {
538
      Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
539
      DisallowHeapAllocation no_gc;
540
      heap->MoveElements(*elms, 0, 1, len - 1);
541 542
      elms->set(len - 1, heap->the_hole_value());
    } else {
543 544
      Handle<FixedDoubleArray> elms = Handle<FixedDoubleArray>::cast(elms_obj);
      MoveDoubleElements(*elms, 0, *elms, 1, len - 1);
545 546
      elms->set_the_hole(len - 1);
    }
547
  }
548 549 550 551

  // Set the length.
  array->set_length(Smi::FromInt(len - 1));

552
  return *first;
553 554 555
}


556
BUILTIN(ArrayUnshift) {
557
  HandleScope scope(isolate);
558
  Heap* heap = isolate->heap();
559
  Handle<Object> receiver = args.receiver();
560
  MaybeHandle<FixedArrayBase> maybe_elms_obj =
561
      EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
562 563
  Handle<FixedArrayBase> elms_obj;
  if (!maybe_elms_obj.ToHandle(&elms_obj) ||
564 565
      !IsJSArrayFastElementMovingAllowed(heap,
                                         *Handle<JSArray>::cast(receiver))) {
566
    return CallJsBuiltin(isolate, "ArrayUnshift", args);
567
  }
568
  Handle<JSArray> array = Handle<JSArray>::cast(receiver);
569
  ASSERT(!array->map()->is_observed());
570 571 572
  if (!array->HasFastSmiOrObjectElements()) {
    return CallJsBuiltin(isolate, "ArrayUnshift", args);
  }
573 574 575
  int len = Smi::cast(array->length())->value();
  int to_add = args.length() - 1;
  int new_length = len + to_add;
576 577 578 579
  // Currently fixed arrays cannot grow too big, so
  // we should never hit this case.
  ASSERT(to_add <= (Smi::kMaxValue - len));

580 581 582 583 584 585
  if (to_add > 0 && JSArray::WouldChangeReadOnlyLength(array, len + to_add)) {
    return CallJsBuiltin(isolate, "ArrayUnshift", args);
  }

  Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);

586 587
  JSObject::EnsureCanContainElements(array, &args, 1, to_add,
                                     DONT_ALLOW_DOUBLE_ELEMENTS);
588

589 590 591
  if (new_length > elms->length()) {
    // New backing storage is needed.
    int capacity = new_length + (new_length >> 1) + 16;
592 593
    Handle<FixedArray> new_elms =
        isolate->factory()->NewUninitializedFixedArray(capacity);
594

595 596
    ElementsKind kind = array->GetElementsKind();
    ElementsAccessor* accessor = array->GetElementsAccessor();
597
    accessor->CopyElements(
598 599
        elms, 0, kind, new_elms, to_add,
        ElementsAccessor::kCopyToEndAndInitializeToHole);
600

601
    elms = new_elms;
602
    array->set_elements(*elms);
603
  } else {
604
    DisallowHeapAllocation no_gc;
605
    heap->MoveElements(*elms, to_add, 0, len);
606 607 608
  }

  // Add the provided values.
609
  DisallowHeapAllocation no_gc;
610 611 612 613 614 615 616 617 618 619 620
  WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
  for (int i = 0; i < to_add; i++) {
    elms->set(i, args[i + 1], mode);
  }

  // Set the length.
  array->set_length(Smi::FromInt(new_length));
  return Smi::FromInt(new_length);
}


621
BUILTIN(ArraySlice) {
622
  HandleScope scope(isolate);
623
  Heap* heap = isolate->heap();
624
  Handle<Object> receiver = args.receiver();
625
  int len = -1;
626 627 628 629 630 631 632 633 634 635
  int relative_start = 0;
  int relative_end = 0;
  {
    DisallowHeapAllocation no_gc;
    if (receiver->IsJSArray()) {
      JSArray* array = JSArray::cast(*receiver);
      if (!IsJSArrayFastElementMovingAllowed(heap, array)) {
        AllowHeapAllocation allow_allocation;
        return CallJsBuiltin(isolate, "ArraySlice", args);
      }
636

637 638 639 640
      if (!array->HasFastElements()) {
        AllowHeapAllocation allow_allocation;
        return CallJsBuiltin(isolate, "ArraySlice", args);
      }
641

642
      len = Smi::cast(array->length())->value();
643
    } else {
644 645 646 647 648 649 650 651 652 653 654 655 656
      // Array.slice(arguments, ...) is quite a common idiom (notably more
      // than 50% of invocations in Web apps).  Treat it in C++ as well.
      Map* arguments_map = isolate->context()->native_context()->
          sloppy_arguments_boilerplate()->map();

      bool is_arguments_object_with_fast_elements =
          receiver->IsJSObject() &&
          JSObject::cast(*receiver)->map() == arguments_map;
      if (!is_arguments_object_with_fast_elements) {
        AllowHeapAllocation allow_allocation;
        return CallJsBuiltin(isolate, "ArraySlice", args);
      }
      JSObject* object = JSObject::cast(*receiver);
657

658 659 660 661
      if (!object->HasFastElements()) {
        AllowHeapAllocation allow_allocation;
        return CallJsBuiltin(isolate, "ArraySlice", args);
      }
662

663 664 665 666 667 668 669 670
      Object* len_obj = object->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
      if (!len_obj->IsSmi()) {
        AllowHeapAllocation allow_allocation;
        return CallJsBuiltin(isolate, "ArraySlice", args);
      }
      len = Smi::cast(len_obj)->value();
      if (len > object->elements()->length()) {
        AllowHeapAllocation allow_allocation;
671 672
        return CallJsBuiltin(isolate, "ArraySlice", args);
      }
673
    }
674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690

    ASSERT(len >= 0);
    int n_arguments = args.length() - 1;

    // Note carefully choosen defaults---if argument is missing,
    // it's undefined which gets converted to 0 for relative_start
    // and to len for relative_end.
    relative_start = 0;
    relative_end = len;
    if (n_arguments > 0) {
      Object* arg1 = args[1];
      if (arg1->IsSmi()) {
        relative_start = Smi::cast(arg1)->value();
      } else if (arg1->IsHeapNumber()) {
        double start = HeapNumber::cast(arg1)->value();
        if (start < kMinInt || start > kMaxInt) {
          AllowHeapAllocation allow_allocation;
691 692
          return CallJsBuiltin(isolate, "ArraySlice", args);
        }
693 694 695
        relative_start = std::isnan(start) ? 0 : static_cast<int>(start);
      } else if (!arg1->IsUndefined()) {
        AllowHeapAllocation allow_allocation;
696
        return CallJsBuiltin(isolate, "ArraySlice", args);
697
      }
698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
      if (n_arguments > 1) {
        Object* arg2 = args[2];
        if (arg2->IsSmi()) {
          relative_end = Smi::cast(arg2)->value();
        } else if (arg2->IsHeapNumber()) {
          double end = HeapNumber::cast(arg2)->value();
          if (end < kMinInt || end > kMaxInt) {
            AllowHeapAllocation allow_allocation;
            return CallJsBuiltin(isolate, "ArraySlice", args);
          }
          relative_end = std::isnan(end) ? 0 : static_cast<int>(end);
        } else if (!arg2->IsUndefined()) {
          AllowHeapAllocation allow_allocation;
          return CallJsBuiltin(isolate, "ArraySlice", args);
        }
      }
714 715 716 717
    }
  }

  // ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 6.
718 719
  int k = (relative_start < 0) ? Max(len + relative_start, 0)
                               : Min(relative_start, len);
720 721

  // ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 8.
722 723
  int final = (relative_end < 0) ? Max(len + relative_end, 0)
                                 : Min(relative_end, len);
724

725 726
  // Calculate the length of result array.
  int result_len = Max(final - k, 0);
727

728 729 730
  Handle<JSObject> object = Handle<JSObject>::cast(receiver);
  Handle<FixedArrayBase> elms(object->elements(), isolate);

731 732
  ElementsKind kind = object->GetElementsKind();
  if (IsHoleyElementsKind(kind)) {
733
    DisallowHeapAllocation no_gc;
734 735 736
    bool packed = true;
    ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
    for (int i = k; i < final; i++) {
737
      if (!accessor->HasElement(object, object, i, elms)) {
738 739 740 741 742 743 744
        packed = false;
        break;
      }
    }
    if (packed) {
      kind = GetPackedElementsKind(kind);
    } else if (!receiver->IsJSArray()) {
745
      AllowHeapAllocation allow_allocation;
746 747 748 749
      return CallJsBuiltin(isolate, "ArraySlice", args);
    }
  }

750 751
  Handle<JSArray> result_array =
      isolate->factory()->NewJSArray(kind, result_len, result_len);
752

753
  DisallowHeapAllocation no_gc;
754
  if (result_len == 0) return *result_array;
755

756
  ElementsAccessor* accessor = object->GetElementsAccessor();
757 758
  accessor->CopyElements(
      elms, k, kind, handle(result_array->elements(), isolate), 0, result_len);
759
  return *result_array;
760 761 762
}


763
BUILTIN(ArraySplice) {
764
  HandleScope scope(isolate);
765
  Heap* heap = isolate->heap();
766
  Handle<Object> receiver = args.receiver();
767
  MaybeHandle<FixedArrayBase> maybe_elms_obj =
768
      EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 3);
769 770
  Handle<FixedArrayBase> elms_obj;
  if (!maybe_elms_obj.ToHandle(&elms_obj) ||
771 772
      !IsJSArrayFastElementMovingAllowed(heap,
                                         *Handle<JSArray>::cast(receiver))) {
773
    return CallJsBuiltin(isolate, "ArraySplice", args);
774
  }
775
  Handle<JSArray> array = Handle<JSArray>::cast(receiver);
776
  ASSERT(!array->map()->is_observed());
777

778 779 780 781
  int len = Smi::cast(array->length())->value();

  int n_arguments = args.length() - 1;

782
  int relative_start = 0;
783
  if (n_arguments > 0) {
784 785
    DisallowHeapAllocation no_gc;
    Object* arg1 = args[1];
786
    if (arg1->IsSmi()) {
787
      relative_start = Smi::cast(arg1)->value();
788
    } else if (arg1->IsHeapNumber()) {
789
      double start = HeapNumber::cast(arg1)->value();
790
      if (start < kMinInt || start > kMaxInt) {
791
        AllowHeapAllocation allow_allocation;
792 793
        return CallJsBuiltin(isolate, "ArraySplice", args);
      }
794
      relative_start = std::isnan(start) ? 0 : static_cast<int>(start);
795
    } else if (!arg1->IsUndefined()) {
796
      AllowHeapAllocation allow_allocation;
797
      return CallJsBuiltin(isolate, "ArraySplice", args);
798
    }
799
  }
800 801
  int actual_start = (relative_start < 0) ? Max(len + relative_start, 0)
                                          : Min(relative_start, len);
802 803

  // SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
804 805
  // given as a request to delete all the elements from the start.
  // And it differs from the case of undefined delete count.
806 807
  // This does not follow ECMA-262, but we do the same for
  // compatibility.
808 809 810 811 812 813 814
  int actual_delete_count;
  if (n_arguments == 1) {
    ASSERT(len - actual_start >= 0);
    actual_delete_count = len - actual_start;
  } else {
    int value = 0;  // ToInteger(undefined) == 0
    if (n_arguments > 1) {
815
      DisallowHeapAllocation no_gc;
816 817 818 819
      Object* arg2 = args[2];
      if (arg2->IsSmi()) {
        value = Smi::cast(arg2)->value();
      } else {
820
        AllowHeapAllocation allow_allocation;
821
        return CallJsBuiltin(isolate, "ArraySplice", args);
822
      }
823
    }
824
    actual_delete_count = Min(Max(value, 0), len - actual_start);
825 826
  }

827 828 829 830 831 832 833 834 835 836 837
  ElementsKind elements_kind = array->GetElementsKind();

  int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
  int new_length = len - actual_delete_count + item_count;

  // For double mode we do not support changing the length.
  if (new_length > len && IsFastDoubleElementsKind(elements_kind)) {
    return CallJsBuiltin(isolate, "ArraySplice", args);
  }

  if (new_length == 0) {
838
    Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(
839 840 841
        elms_obj, elements_kind, actual_delete_count);
    array->set_elements(heap->empty_fixed_array());
    array->set_length(Smi::FromInt(0));
842
    return *result;
843 844
  }

845 846 847 848
  Handle<JSArray> result_array =
      isolate->factory()->NewJSArray(elements_kind,
                                     actual_delete_count,
                                     actual_delete_count);
849

850
  if (actual_delete_count > 0) {
851
    DisallowHeapAllocation no_gc;
852
    ElementsAccessor* accessor = array->GetElementsAccessor();
853
    accessor->CopyElements(
854 855
        elms_obj, actual_start, elements_kind,
        handle(result_array->elements(), isolate), 0, actual_delete_count);
856
  }
857

858
  bool elms_changed = false;
859
  if (item_count < actual_delete_count) {
860
    // Shrink the array.
861
    const bool trim_array = !heap->lo_space()->Contains(*elms_obj) &&
862 863 864 865 866
      ((actual_start + item_count) <
          (len - actual_delete_count - actual_start));
    if (trim_array) {
      const int delta = actual_delete_count - item_count;

867
      if (elms_obj->IsFixedDoubleArray()) {
868 869 870
        Handle<FixedDoubleArray> elms =
            Handle<FixedDoubleArray>::cast(elms_obj);
        MoveDoubleElements(*elms, delta, *elms, 0, actual_start);
871
      } else {
872
        Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
873
        DisallowHeapAllocation no_gc;
874
        heap->MoveElements(*elms, delta, 0, actual_start);
875 876
      }

877 878
      if (heap->CanMoveObjectStart(*elms_obj)) {
        // On the fast path we move the start of the object in memory.
879
        elms_obj = handle(LeftTrimFixedArray(heap, *elms_obj, delta), isolate);
880 881 882 883 884 885 886
      } else {
        // This is the slow path. We are going to move the elements to the left
        // by copying them. For trimmed values we store the hole.
        if (elms_obj->IsFixedDoubleArray()) {
          Handle<FixedDoubleArray> elms =
              Handle<FixedDoubleArray>::cast(elms_obj);
          MoveDoubleElements(*elms, 0, *elms, delta, len - delta);
887
          elms->FillWithHoles(len - delta, len);
888 889 890 891
        } else {
          Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
          DisallowHeapAllocation no_gc;
          heap->MoveElements(*elms, 0, delta, len - delta);
892
          elms->FillWithHoles(len - delta, len);
893 894
        }
      }
895
      elms_changed = true;
896
    } else {
897
      if (elms_obj->IsFixedDoubleArray()) {
898 899 900 901
        Handle<FixedDoubleArray> elms =
            Handle<FixedDoubleArray>::cast(elms_obj);
        MoveDoubleElements(*elms, actual_start + item_count,
                           *elms, actual_start + actual_delete_count,
902
                           (len - actual_delete_count - actual_start));
903
        elms->FillWithHoles(new_length, len);
904
      } else {
905
        Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
906
        DisallowHeapAllocation no_gc;
907
        heap->MoveElements(*elms, actual_start + item_count,
908 909
                           actual_start + actual_delete_count,
                           (len - actual_delete_count - actual_start));
910
        elms->FillWithHoles(new_length, len);
911
      }
912
    }
913
  } else if (item_count > actual_delete_count) {
914
    Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
915 916
    // Currently fixed arrays cannot grow too big, so
    // we should never hit this case.
917
    ASSERT((item_count - actual_delete_count) <= (Smi::kMaxValue - len));
918

919 920 921 922
    // Check if array need to grow.
    if (new_length > elms->length()) {
      // New backing storage is needed.
      int capacity = new_length + (new_length >> 1) + 16;
923 924
      Handle<FixedArray> new_elms =
          isolate->factory()->NewUninitializedFixedArray(capacity);
925

926
      DisallowHeapAllocation no_gc;
927

928 929
      ElementsKind kind = array->GetElementsKind();
      ElementsAccessor* accessor = array->GetElementsAccessor();
930 931
      if (actual_start > 0) {
        // Copy the part before actual_start as is.
932
        accessor->CopyElements(
933
            elms, 0, kind, new_elms, 0, actual_start);
934
      }
935
      accessor->CopyElements(
936
          elms, actual_start + actual_delete_count, kind,
937
          new_elms, actual_start + item_count,
938
          ElementsAccessor::kCopyToEndAndInitializeToHole);
939

940
      elms_obj = new_elms;
941
      elms_changed = true;
942
    } else {
943
      DisallowHeapAllocation no_gc;
944
      heap->MoveElements(*elms, actual_start + item_count,
945 946
                         actual_start + actual_delete_count,
                         (len - actual_delete_count - actual_start));
947 948 949
    }
  }

950
  if (IsFastDoubleElementsKind(elements_kind)) {
951
    Handle<FixedDoubleArray> elms = Handle<FixedDoubleArray>::cast(elms_obj);
952 953 954 955 956 957 958 959 960
    for (int k = actual_start; k < actual_start + item_count; k++) {
      Object* arg = args[3 + k - actual_start];
      if (arg->IsSmi()) {
        elms->set(k, Smi::cast(arg)->value());
      } else {
        elms->set(k, HeapNumber::cast(arg)->value());
      }
    }
  } else {
961
    Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
962
    DisallowHeapAllocation no_gc;
963 964 965 966
    WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
    for (int k = actual_start; k < actual_start + item_count; k++) {
      elms->set(k, args[3 + k - actual_start], mode);
    }
967 968
  }

969
  if (elms_changed) {
970
    array->set_elements(*elms_obj);
971
  }
972 973 974
  // Set the length.
  array->set_length(Smi::FromInt(new_length));

975
  return *result_array;
976 977 978
}


979
BUILTIN(ArrayConcat) {
980
  HandleScope scope(isolate);
981

982 983
  int n_arguments = args.length();
  int result_len = 0;
984
  ElementsKind elements_kind = GetInitialFastElementsKind();
985
  bool has_double = false;
986
  {
987
    DisallowHeapAllocation no_gc;
988 989 990 991 992
    Heap* heap = isolate->heap();
    Context* native_context = isolate->context()->native_context();
    JSObject* array_proto =
        JSObject::cast(native_context->array_function()->prototype());
    if (!ArrayPrototypeHasNoElements(heap, native_context, array_proto)) {
993
      AllowHeapAllocation allow_allocation;
994
      return CallJsBuiltin(isolate, "ArrayConcatJS", args);
995
    }
996

997 998 999 1000 1001 1002 1003 1004 1005
    // Iterate through all the arguments performing checks
    // and calculating total length.
    bool is_holey = false;
    for (int i = 0; i < n_arguments; i++) {
      Object* arg = args[i];
      if (!arg->IsJSArray() ||
          !JSArray::cast(arg)->HasFastElements() ||
          JSArray::cast(arg)->GetPrototype() != array_proto) {
        AllowHeapAllocation allow_allocation;
1006
        return CallJsBuiltin(isolate, "ArrayConcatJS", args);
1007 1008
      }
      int len = Smi::cast(JSArray::cast(arg)->length())->value();
1009

1010 1011 1012 1013 1014 1015
      // We shouldn't overflow when adding another len.
      const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
      STATIC_ASSERT(FixedArray::kMaxLength < kHalfOfMaxInt);
      USE(kHalfOfMaxInt);
      result_len += len;
      ASSERT(result_len >= 0);
1016

1017 1018
      if (result_len > FixedDoubleArray::kMaxLength) {
        AllowHeapAllocation allow_allocation;
1019
        return CallJsBuiltin(isolate, "ArrayConcatJS", args);
1020 1021 1022 1023 1024 1025 1026 1027
      }

      ElementsKind arg_kind = JSArray::cast(arg)->map()->elements_kind();
      has_double = has_double || IsFastDoubleElementsKind(arg_kind);
      is_holey = is_holey || IsFastHoleyElementsKind(arg_kind);
      if (IsMoreGeneralElementsKindTransition(elements_kind, arg_kind)) {
        elements_kind = arg_kind;
      }
1028
    }
1029
    if (is_holey) elements_kind = GetHoleyElementsKind(elements_kind);
1030 1031
  }

1032 1033 1034 1035 1036 1037
  // If a double array is concatted into a fast elements array, the fast
  // elements array needs to be initialized to contain proper holes, since
  // boxing doubles may cause incremental marking.
  ArrayStorageAllocationMode mode =
      has_double && IsFastObjectElementsKind(elements_kind)
      ? INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE : DONT_INITIALIZE_ARRAY_ELEMENTS;
1038 1039 1040 1041 1042 1043
  Handle<JSArray> result_array =
      isolate->factory()->NewJSArray(elements_kind,
                                     result_len,
                                     result_len,
                                     mode);
  if (result_len == 0) return *result_array;
1044

1045
  int j = 0;
1046
  Handle<FixedArrayBase> storage(result_array->elements(), isolate);
1047
  ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
1048
  for (int i = 0; i < n_arguments; i++) {
1049 1050 1051
    // TODO(ishell): It is crucial to keep |array| as a raw pointer to avoid
    // performance degradation. Revisit this later.
    JSArray* array = JSArray::cast(args[i]);
1052
    int len = Smi::cast(array->length())->value();
1053
    ElementsKind from_kind = array->GetElementsKind();
1054
    if (len > 0) {
1055
      accessor->CopyElements(array, 0, from_kind, storage, j, len);
1056 1057
      j += len;
    }
1058
  }
1059 1060

  ASSERT(j == result_len);
1061

1062
  return *result_array;
1063 1064 1065
}


1066
// -----------------------------------------------------------------------------
1067
// Generator and strict mode poison pills
1068 1069


1070
BUILTIN(StrictModePoisonPill) {
1071
  HandleScope scope(isolate);
1072
  return isolate->Throw(*isolate->factory()->NewTypeError(
1073
      "strict_poison_pill", HandleVector<Object>(NULL, 0)));
1074 1075
}

1076

1077 1078 1079 1080 1081 1082 1083
BUILTIN(GeneratorPoisonPill) {
  HandleScope scope(isolate);
  return isolate->Throw(*isolate->factory()->NewTypeError(
      "generator_poison_pill", HandleVector<Object>(NULL, 0)));
}


1084 1085 1086 1087
// -----------------------------------------------------------------------------
//


1088 1089 1090 1091 1092 1093
// Searches the hidden prototype chain of the given object for the first
// object that is an instance of the given type.  If no such object can
// be found then Heap::null_value() is returned.
static inline Object* FindHidden(Heap* heap,
                                 Object* object,
                                 FunctionTemplateInfo* type) {
1094
  if (type->IsTemplateFor(object)) return object;
1095
  Object* proto = object->GetPrototype(heap->isolate());
1096 1097 1098 1099 1100 1101 1102 1103
  if (proto->IsJSObject() &&
      JSObject::cast(proto)->map()->is_hidden_prototype()) {
    return FindHidden(heap, proto, type);
  }
  return heap->null_value();
}


1104 1105 1106
// Returns the holder JSObject if the function can legally be called
// with this receiver.  Returns Heap::null_value() if the call is
// illegal.  Any arguments that don't fit the expected type is
1107 1108 1109
// overwritten with undefined.  Note that holder and the arguments are
// implicitly rewritten with the first object in the hidden prototype
// chain that actually has the expected type.
1110 1111
static inline Object* TypeCheck(Heap* heap,
                                int argc,
1112 1113 1114
                                Object** argv,
                                FunctionTemplateInfo* info) {
  Object* recv = argv[0];
1115 1116
  // API calls are only supported with JSObject receivers.
  if (!recv->IsJSObject()) return heap->null_value();
1117 1118 1119 1120 1121 1122 1123
  Object* sig_obj = info->signature();
  if (sig_obj->IsUndefined()) return recv;
  SignatureInfo* sig = SignatureInfo::cast(sig_obj);
  // If necessary, check the receiver
  Object* recv_type = sig->receiver();
  Object* holder = recv;
  if (!recv_type->IsUndefined()) {
1124 1125
    holder = FindHidden(heap, holder, FunctionTemplateInfo::cast(recv_type));
    if (holder == heap->null_value()) return heap->null_value();
1126 1127 1128 1129 1130 1131
  }
  Object* args_obj = sig->args();
  // If there is no argument signature we're done
  if (args_obj->IsUndefined()) return holder;
  FixedArray* args = FixedArray::cast(args_obj);
  int length = args->length();
1132
  if (argc <= length) length = argc - 1;
1133 1134 1135 1136 1137
  for (int i = 0; i < length; i++) {
    Object* argtype = args->get(i);
    if (argtype->IsUndefined()) continue;
    Object** arg = &argv[-1 - i];
    Object* current = *arg;
1138 1139 1140
    current = FindHidden(heap, current, FunctionTemplateInfo::cast(argtype));
    if (current == heap->null_value()) current = heap->undefined_value();
    *arg = current;
1141 1142 1143 1144 1145
  }
  return holder;
}


1146
template <bool is_construct>
1147
MUST_USE_RESULT static Object* HandleApiCallHelper(
1148 1149 1150
    BuiltinArguments<NEEDS_CALLED_FUNCTION> args, Isolate* isolate) {
  ASSERT(is_construct == CalledAsConstructor(isolate));
  Heap* heap = isolate->heap();
1151

1152
  HandleScope scope(isolate);
1153
  Handle<JSFunction> function = args.called_function();
1154
  ASSERT(function->shared()->IsApiFunction());
1155

1156 1157
  Handle<FunctionTemplateInfo> fun_data(
      function->shared()->get_api_func_data(), isolate);
1158
  if (is_construct) {
1159 1160 1161 1162
    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
        isolate, fun_data,
        isolate->factory()->ConfigureInstance(
            fun_data, Handle<JSObject>::cast(args.receiver())));
1163 1164
  }

1165
  SharedFunctionInfo* shared = function->shared();
1166
  if (shared->strict_mode() == SLOPPY && !shared->native()) {
1167 1168 1169 1170 1171 1172 1173
    Object* recv = args[0];
    ASSERT(!recv->IsNull());
    if (recv->IsUndefined()) {
      args[0] = function->context()->global_object()->global_receiver();
    }
  }

1174
  Object* raw_holder = TypeCheck(heap, args.length(), &args[0], *fun_data);
1175 1176 1177 1178

  if (raw_holder->IsNull()) {
    // This function cannot be called with the given receiver.  Abort!
    Handle<Object> obj =
1179 1180 1181
        isolate->factory()->NewTypeError(
            "illegal_invocation", HandleVector(&function, 1));
    return isolate->Throw(*obj);
1182 1183 1184 1185 1186 1187
  }

  Object* raw_call_data = fun_data->call_code();
  if (!raw_call_data->IsUndefined()) {
    CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
    Object* callback_obj = call_data->callback();
1188 1189
    v8::FunctionCallback callback =
        v8::ToCData<v8::FunctionCallback>(callback_obj);
1190 1191 1192
    Object* data_obj = call_data->data();
    Object* result;

1193
    LOG(isolate, ApiObjectAccess("call", JSObject::cast(*args.receiver())));
1194 1195
    ASSERT(raw_holder->IsJSObject());

1196 1197 1198 1199 1200 1201 1202
    FunctionCallbackArguments custom(isolate,
                                     data_obj,
                                     *function,
                                     raw_holder,
                                     &args[0] - 1,
                                     args.length() - 1,
                                     is_construct);
1203

1204
    v8::Handle<v8::Value> value = custom.Call(callback);
1205
    if (value.IsEmpty()) {
1206
      result = heap->undefined_value();
1207 1208
    } else {
      result = *reinterpret_cast<Object**>(*value);
1209
      result->VerifyApiCallResultType();
1210 1211
    }

1212
    RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
1213 1214 1215
    if (!is_construct || result->IsJSObject()) return result;
  }

1216 1217 1218 1219 1220
  return *args.receiver();
}


BUILTIN(HandleApiCall) {
1221
  return HandleApiCallHelper<false>(args, isolate);
1222 1223 1224 1225
}


BUILTIN(HandleApiCallConstruct) {
1226
  return HandleApiCallHelper<true>(args, isolate);
1227 1228 1229
}


1230 1231 1232
// Helper function to handle calls to non-function objects created through the
// API. The object can be called as either a constructor (using new) or just as
// a function (without new).
1233
MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
1234
    Isolate* isolate,
1235 1236
    bool is_construct_call,
    BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
1237 1238
  // Non-functions are never called as constructors. Even if this is an object
  // called as a constructor the delegate call is not a construct call.
1239 1240
  ASSERT(!CalledAsConstructor(isolate));
  Heap* heap = isolate->heap();
1241

1242
  Handle<Object> receiver = args.receiver();
1243

1244
  // Get the object called.
1245
  JSObject* obj = JSObject::cast(*receiver);
1246 1247 1248 1249 1250

  // Get the invocation callback from the function descriptor that was
  // used to create the called object.
  ASSERT(obj->map()->has_instance_call_handler());
  JSFunction* constructor = JSFunction::cast(obj->map()->constructor());
1251
  ASSERT(constructor->shared()->IsApiFunction());
1252
  Object* handler =
1253
      constructor->shared()->get_api_func_data()->instance_call_handler();
1254 1255 1256
  ASSERT(!handler->IsUndefined());
  CallHandlerInfo* call_data = CallHandlerInfo::cast(handler);
  Object* callback_obj = call_data->callback();
1257 1258
  v8::FunctionCallback callback =
      v8::ToCData<v8::FunctionCallback>(callback_obj);
1259 1260 1261

  // Get the data for the call and perform the callback.
  Object* result;
1262
  {
1263 1264
    HandleScope scope(isolate);
    LOG(isolate, ApiObjectAccess("call non-function", obj));
1265

1266 1267 1268 1269 1270 1271 1272
    FunctionCallbackArguments custom(isolate,
                                     call_data->data(),
                                     constructor,
                                     obj,
                                     &args[0] - 1,
                                     args.length() - 1,
                                     is_construct_call);
1273
    v8::Handle<v8::Value> value = custom.Call(callback);
1274
    if (value.IsEmpty()) {
1275
      result = heap->undefined_value();
1276 1277
    } else {
      result = *reinterpret_cast<Object**>(*value);
1278
      result->VerifyApiCallResultType();
1279 1280 1281
    }
  }
  // Check for exceptions and return result.
1282
  RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
1283 1284
  return result;
}
1285 1286 1287 1288 1289


// Handle calls to non-function objects created through the API. This delegate
// function is used when the call is a normal function call.
BUILTIN(HandleApiCallAsFunction) {
1290
  return HandleApiCallAsFunctionOrConstructor(isolate, false, args);
1291 1292 1293 1294 1295 1296
}


// Handle calls to non-function objects created through the API. This delegate
// function is used when the call is a construct call.
BUILTIN(HandleApiCallAsConstructor) {
1297
  return HandleApiCallAsFunctionOrConstructor(isolate, true, args);
1298
}
1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310


static void Generate_LoadIC_Miss(MacroAssembler* masm) {
  LoadIC::GenerateMiss(masm);
}


static void Generate_LoadIC_Normal(MacroAssembler* masm) {
  LoadIC::GenerateNormal(masm);
}


1311
static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
1312
  LoadStubCompiler::GenerateLoadViaGetterForDeopt(masm);
1313 1314 1315
}


1316 1317 1318 1319 1320
static void Generate_LoadIC_Slow(MacroAssembler* masm) {
  LoadIC::GenerateRuntimeGetProperty(masm);
}


1321 1322 1323 1324 1325
static void Generate_KeyedLoadIC_Initialize(MacroAssembler* masm) {
  KeyedLoadIC::GenerateInitialize(masm);
}


danno@chromium.org's avatar
danno@chromium.org committed
1326 1327 1328 1329 1330
static void Generate_KeyedLoadIC_Slow(MacroAssembler* masm) {
  KeyedLoadIC::GenerateRuntimeGetProperty(masm);
}


1331
static void Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
1332
  KeyedLoadIC::GenerateMiss(masm);
1333 1334 1335 1336 1337 1338 1339 1340
}


static void Generate_KeyedLoadIC_Generic(MacroAssembler* masm) {
  KeyedLoadIC::GenerateGeneric(masm);
}


1341 1342 1343 1344 1345
static void Generate_KeyedLoadIC_String(MacroAssembler* masm) {
  KeyedLoadIC::GenerateString(masm);
}


1346 1347 1348 1349
static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) {
  KeyedLoadIC::GeneratePreMonomorphic(masm);
}

1350

1351 1352 1353 1354
static void Generate_KeyedLoadIC_IndexedInterceptor(MacroAssembler* masm) {
  KeyedLoadIC::GenerateIndexedInterceptor(masm);
}

1355

1356 1357
static void Generate_KeyedLoadIC_SloppyArguments(MacroAssembler* masm) {
  KeyedLoadIC::GenerateSloppyArguments(masm);
1358
}
1359

1360

1361 1362 1363 1364 1365
static void Generate_StoreIC_Slow(MacroAssembler* masm) {
  StoreIC::GenerateSlow(masm);
}


1366 1367 1368 1369 1370
static void Generate_StoreIC_Miss(MacroAssembler* masm) {
  StoreIC::GenerateMiss(masm);
}


1371 1372 1373 1374 1375
static void Generate_StoreIC_Normal(MacroAssembler* masm) {
  StoreIC::GenerateNormal(masm);
}


1376
static void Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
1377
  StoreStubCompiler::GenerateStoreViaSetterForDeopt(masm);
1378 1379 1380
}


1381
static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
1382
  KeyedStoreIC::GenerateGeneric(masm, SLOPPY);
1383 1384 1385 1386
}


static void Generate_KeyedStoreIC_Generic_Strict(MacroAssembler* masm) {
1387
  KeyedStoreIC::GenerateGeneric(masm, STRICT);
1388 1389 1390 1391
}


static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
1392
  KeyedStoreIC::GenerateMiss(masm);
danno@chromium.org's avatar
danno@chromium.org committed
1393 1394 1395 1396 1397
}


static void Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
  KeyedStoreIC::GenerateSlow(masm);
1398 1399 1400 1401 1402 1403 1404 1405
}


static void Generate_KeyedStoreIC_Initialize(MacroAssembler* masm) {
  KeyedStoreIC::GenerateInitialize(masm);
}


1406 1407 1408 1409
static void Generate_KeyedStoreIC_Initialize_Strict(MacroAssembler* masm) {
  KeyedStoreIC::GenerateInitialize(masm);
}

1410

1411 1412 1413 1414 1415 1416 1417 1418 1419 1420
static void Generate_KeyedStoreIC_PreMonomorphic(MacroAssembler* masm) {
  KeyedStoreIC::GeneratePreMonomorphic(masm);
}


static void Generate_KeyedStoreIC_PreMonomorphic_Strict(MacroAssembler* masm) {
  KeyedStoreIC::GeneratePreMonomorphic(masm);
}


1421 1422
static void Generate_KeyedStoreIC_SloppyArguments(MacroAssembler* masm) {
  KeyedStoreIC::GenerateSloppyArguments(masm);
1423
}
1424

1425

1426
static void Generate_CallICStub_DebugBreak(MacroAssembler* masm) {
1427
  DebugCodegen::GenerateCallICStubDebugBreak(masm);
1428 1429 1430
}


1431
static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) {
1432
  DebugCodegen::GenerateLoadICDebugBreak(masm);
1433 1434 1435 1436
}


static void Generate_StoreIC_DebugBreak(MacroAssembler* masm) {
1437
  DebugCodegen::GenerateStoreICDebugBreak(masm);
1438 1439 1440 1441
}


static void Generate_KeyedLoadIC_DebugBreak(MacroAssembler* masm) {
1442
  DebugCodegen::GenerateKeyedLoadICDebugBreak(masm);
1443 1444 1445 1446
}


static void Generate_KeyedStoreIC_DebugBreak(MacroAssembler* masm) {
1447
  DebugCodegen::GenerateKeyedStoreICDebugBreak(masm);
1448 1449 1450
}


1451
static void Generate_CompareNilIC_DebugBreak(MacroAssembler* masm) {
1452
  DebugCodegen::GenerateCompareNilICDebugBreak(masm);
1453 1454 1455
}


1456
static void Generate_Return_DebugBreak(MacroAssembler* masm) {
1457
  DebugCodegen::GenerateReturnDebugBreak(masm);
1458 1459 1460
}


1461
static void Generate_CallFunctionStub_DebugBreak(MacroAssembler* masm) {
1462
  DebugCodegen::GenerateCallFunctionStubDebugBreak(masm);
1463
}
1464

1465

1466
static void Generate_CallConstructStub_DebugBreak(MacroAssembler* masm) {
1467
  DebugCodegen::GenerateCallConstructStubDebugBreak(masm);
1468 1469 1470 1471 1472
}


static void Generate_CallConstructStub_Recording_DebugBreak(
    MacroAssembler* masm) {
1473
  DebugCodegen::GenerateCallConstructStubRecordDebugBreak(masm);
1474 1475 1476
}


1477
static void Generate_Slot_DebugBreak(MacroAssembler* masm) {
1478
  DebugCodegen::GenerateSlotDebugBreak(masm);
1479 1480 1481
}


1482
static void Generate_PlainReturn_LiveEdit(MacroAssembler* masm) {
1483
  DebugCodegen::GeneratePlainReturnLiveEdit(masm);
1484 1485
}

1486

1487
static void Generate_FrameDropper_LiveEdit(MacroAssembler* masm) {
1488
  DebugCodegen::GenerateFrameDropperLiveEdit(masm);
1489
}
1490

1491 1492 1493 1494 1495 1496 1497 1498 1499 1500

Builtins::Builtins() : initialized_(false) {
  memset(builtins_, 0, sizeof(builtins_[0]) * builtin_count);
  memset(names_, 0, sizeof(names_[0]) * builtin_count);
}


Builtins::~Builtins() {
}

1501

1502
#define DEF_ENUM_C(name, ignore) FUNCTION_ADDR(Builtin_##name),
1503 1504 1505
Address const Builtins::c_functions_[cfunction_count] = {
  BUILTIN_LIST_C(DEF_ENUM_C)
};
1506 1507 1508 1509
#undef DEF_ENUM_C

#define DEF_JS_NAME(name, ignore) #name,
#define DEF_JS_ARGC(ignore, argc) argc,
1510
const char* const Builtins::javascript_names_[id_count] = {
1511 1512 1513
  BUILTINS_LIST_JS(DEF_JS_NAME)
};

1514
int const Builtins::javascript_argc_[id_count] = {
1515 1516 1517 1518 1519
  BUILTINS_LIST_JS(DEF_JS_ARGC)
};
#undef DEF_JS_NAME
#undef DEF_JS_ARGC

1520 1521 1522 1523 1524 1525 1526 1527
struct BuiltinDesc {
  byte* generator;
  byte* c_code;
  const char* s_name;  // name is only used for generating log information.
  int name;
  Code::Flags flags;
  BuiltinExtraArguments extra_args;
};
1528

1529 1530
#define BUILTIN_FUNCTION_TABLE_INIT { V8_ONCE_INIT, {} }

1531 1532
class BuiltinFunctionTable {
 public:
1533
  BuiltinDesc* functions() {
1534
    base::CallOnce(&once_, &Builtins::InitBuiltinFunctionTable);
1535
    return functions_;
1536 1537
  }

1538
  base::OnceType once_;
1539
  BuiltinDesc functions_[Builtins::builtin_count + 1];
1540 1541 1542

  friend class Builtins;
};
1543

1544 1545
static BuiltinFunctionTable builtin_function_table =
    BUILTIN_FUNCTION_TABLE_INIT;
1546 1547 1548 1549 1550 1551

// Define array of pointers to generators and C builtin functions.
// We do this in a sort of roundabout way so that we can do the initialization
// within the lexical scope of Builtins:: and within a context where
// Code::Flags names a non-abstract type.
void Builtins::InitBuiltinFunctionTable() {
1552
  BuiltinDesc* functions = builtin_function_table.functions_;
1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572
  functions[builtin_count].generator = NULL;
  functions[builtin_count].c_code = NULL;
  functions[builtin_count].s_name = NULL;
  functions[builtin_count].name = builtin_count;
  functions[builtin_count].flags = static_cast<Code::Flags>(0);
  functions[builtin_count].extra_args = NO_EXTRA_ARGUMENTS;

#define DEF_FUNCTION_PTR_C(aname, aextra_args)                         \
    functions->generator = FUNCTION_ADDR(Generate_Adaptor);            \
    functions->c_code = FUNCTION_ADDR(Builtin_##aname);                \
    functions->s_name = #aname;                                        \
    functions->name = c_##aname;                                       \
    functions->flags = Code::ComputeFlags(Code::BUILTIN);              \
    functions->extra_args = aextra_args;                               \
    ++functions;

#define DEF_FUNCTION_PTR_A(aname, kind, state, extra)                       \
    functions->generator = FUNCTION_ADDR(Generate_##aname);                 \
    functions->c_code = NULL;                                               \
    functions->s_name = #aname;                                             \
1573
    functions->name = k##aname;                                             \
1574 1575 1576 1577 1578 1579
    functions->flags = Code::ComputeFlags(Code::kind,                       \
                                          state,                            \
                                          extra);                           \
    functions->extra_args = NO_EXTRA_ARGUMENTS;                             \
    ++functions;

1580
#define DEF_FUNCTION_PTR_H(aname, kind)                                     \
1581 1582 1583 1584
    functions->generator = FUNCTION_ADDR(Generate_##aname);                 \
    functions->c_code = NULL;                                               \
    functions->s_name = #aname;                                             \
    functions->name = k##aname;                                             \
1585
    functions->flags = Code::ComputeHandlerFlags(Code::kind);               \
1586 1587 1588
    functions->extra_args = NO_EXTRA_ARGUMENTS;                             \
    ++functions;

1589 1590
  BUILTIN_LIST_C(DEF_FUNCTION_PTR_C)
  BUILTIN_LIST_A(DEF_FUNCTION_PTR_A)
1591
  BUILTIN_LIST_H(DEF_FUNCTION_PTR_H)
1592
  BUILTIN_LIST_DEBUG_A(DEF_FUNCTION_PTR_A)
1593 1594 1595

#undef DEF_FUNCTION_PTR_C
#undef DEF_FUNCTION_PTR_A
1596 1597
}

1598

1599
void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
1600 1601 1602
  ASSERT(!initialized_);

  // Create a scope for the handles in the builtins.
1603
  HandleScope scope(isolate);
1604

1605
  const BuiltinDesc* functions = builtin_function_table.functions();
1606 1607

  // For now we generate builtin adaptor code into a stack-allocated
1608 1609
  // buffer, before copying it into individual code objects. Be careful
  // with alignment, some platforms don't like unaligned code.
1610 1611 1612 1613 1614 1615 1616
#ifdef DEBUG
  // We can generate a lot of debug code on Arm64.
  const size_t buffer_size = 32*KB;
#else
  const size_t buffer_size = 8*KB;
#endif
  union { int force_alignment; byte buffer[buffer_size]; } u;
1617 1618 1619 1620 1621

  // Traverse the list of builtins and generate an adaptor in a
  // separate code object for each one.
  for (int i = 0; i < builtin_count; i++) {
    if (create_heap_objects) {
1622
      MacroAssembler masm(isolate, u.buffer, sizeof u.buffer);
1623
      // Generate the code/adaptor.
1624
      typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
1625 1626 1627 1628
      Generator g = FUNCTION_CAST<Generator>(functions[i].generator);
      // We pass all arguments to the generator, but it may not use all of
      // them.  This works because the first arguments are on top of the
      // stack.
1629
      ASSERT(!masm.has_frame());
1630
      g(&masm, functions[i].name, functions[i].extra_args);
1631 1632 1633 1634
      // Move the code into the object heap.
      CodeDesc desc;
      masm.GetCode(&desc);
      Code::Flags flags =  functions[i].flags;
1635 1636
      Handle<Code> code =
          isolate->factory()->NewCode(desc, flags, masm.CodeObject());
1637
      // Log the event and add the code to the builtins array.
1638
      PROFILE(isolate,
1639 1640 1641
              CodeCreateEvent(Logger::BUILTIN_TAG, *code, functions[i].s_name));
      GDBJIT(AddCode(GDBJITInterface::BUILTIN, functions[i].s_name, *code));
      builtins_[i] = *code;
1642
#ifdef ENABLE_DISASSEMBLER
1643
      if (FLAG_print_builtin_code) {
1644 1645
        CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
        PrintF(trace_scope.file(), "Builtin: %s\n", functions[i].s_name);
1646
        code->Disassemble(functions[i].s_name, trace_scope.file());
1647
        PrintF(trace_scope.file(), "\n");
1648 1649
      }
#endif
1650 1651 1652 1653 1654 1655 1656 1657
    } else {
      // Deserializing. The values will be filled in during IterateBuiltins.
      builtins_[i] = NULL;
    }
    names_[i] = functions[i].s_name;
  }

  // Mark as initialized.
1658
  initialized_ = true;
1659 1660 1661 1662
}


void Builtins::TearDown() {
1663
  initialized_ = false;
1664 1665 1666 1667 1668 1669 1670 1671 1672
}


void Builtins::IterateBuiltins(ObjectVisitor* v) {
  v->VisitPointers(&builtins_[0], &builtins_[0] + builtin_count);
}


const char* Builtins::Lookup(byte* pc) {
1673 1674
  // may be called during initialization (disassembler!)
  if (initialized_) {
1675 1676 1677 1678 1679 1680 1681 1682 1683 1684
    for (int i = 0; i < builtin_count; i++) {
      Code* entry = Code::cast(builtins_[i]);
      if (entry->contains(pc)) {
        return names_[i];
      }
    }
  }
  return NULL;
}

1685

1686
void Builtins::Generate_InterruptCheck(MacroAssembler* masm) {
1687
  masm->TailCallRuntime(Runtime::kInterrupt, 0, 1);
1688 1689 1690 1691
}


void Builtins::Generate_StackCheck(MacroAssembler* masm) {
1692
  masm->TailCallRuntime(Runtime::kStackGuard, 0, 1);
1693 1694 1695
}


1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707
#define DEFINE_BUILTIN_ACCESSOR_C(name, ignore)               \
Handle<Code> Builtins::name() {                               \
  Code** code_address =                                       \
      reinterpret_cast<Code**>(builtin_address(k##name));     \
  return Handle<Code>(code_address);                          \
}
#define DEFINE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
Handle<Code> Builtins::name() {                             \
  Code** code_address =                                     \
      reinterpret_cast<Code**>(builtin_address(k##name));   \
  return Handle<Code>(code_address);                        \
}
1708
#define DEFINE_BUILTIN_ACCESSOR_H(name, kind)               \
1709 1710 1711 1712 1713
Handle<Code> Builtins::name() {                             \
  Code** code_address =                                     \
      reinterpret_cast<Code**>(builtin_address(k##name));   \
  return Handle<Code>(code_address);                        \
}
1714 1715
BUILTIN_LIST_C(DEFINE_BUILTIN_ACCESSOR_C)
BUILTIN_LIST_A(DEFINE_BUILTIN_ACCESSOR_A)
1716
BUILTIN_LIST_H(DEFINE_BUILTIN_ACCESSOR_H)
1717 1718 1719 1720 1721
BUILTIN_LIST_DEBUG_A(DEFINE_BUILTIN_ACCESSOR_A)
#undef DEFINE_BUILTIN_ACCESSOR_C
#undef DEFINE_BUILTIN_ACCESSOR_A


1722
} }  // namespace v8::internal