codegen-arm.cc 242 KB
Newer Older
1
// Copyright 2010 the V8 project authors. All rights reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
//     * Redistributions of source code must retain the above copyright
//       notice, this list of conditions and the following disclaimer.
//     * Redistributions in binary form must reproduce the above
//       copyright notice, this list of conditions and the following
//       disclaimer in the documentation and/or other materials provided
//       with the distribution.
//     * Neither the name of Google Inc. nor the names of its
//       contributors may be used to endorse or promote products derived
//       from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#include "v8.h"

30 31
#if defined(V8_TARGET_ARCH_ARM)

32
#include "bootstrapper.h"
33
#include "code-stubs.h"
34
#include "codegen-inl.h"
35
#include "compiler.h"
36
#include "debug.h"
37
#include "ic-inl.h"
38
#include "jsregexp.h"
39
#include "jump-target-light-inl.h"
40
#include "parser.h"
41 42
#include "regexp-macro-assembler.h"
#include "regexp-stack.h"
43
#include "register-allocator-inl.h"
44
#include "runtime.h"
45
#include "scopes.h"
46
#include "virtual-frame-inl.h"
47
#include "virtual-frame-arm-inl.h"
48

49 50
namespace v8 {
namespace internal {
51

52

53 54 55 56 57 58
#define __ ACCESS_MASM(masm_)

// -------------------------------------------------------------------------
// Platform-specific DeferredCode functions.

void DeferredCode::SaveRegisters() {
59
  // On ARM you either have a completely spilled frame or you
60 61
  // handle it yourself, but at the moment there's no automation
  // of registers and deferred code.
62 63 64 65 66 67 68 69 70 71 72
}


void DeferredCode::RestoreRegisters() {
}


// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.

void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
73
  frame_state_->frame()->AssertIsSpilled();
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
}


void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
}


void ICRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
  masm->EnterInternalFrame();
}


void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
  masm->LeaveInternalFrame();
}


91 92 93
// -------------------------------------------------------------------------
// CodeGenState implementation.

94
CodeGenState::CodeGenState(CodeGenerator* owner)
95
    : owner_(owner),
96 97
      previous_(owner->state()) {
  owner->set_state(this);
98 99 100
}


101 102 103 104
ConditionCodeGenState::ConditionCodeGenState(CodeGenerator* owner,
                                             JumpTarget* true_target,
                                             JumpTarget* false_target)
    : CodeGenState(owner),
105
      true_target_(true_target),
106 107 108 109 110 111 112 113 114 115 116 117
      false_target_(false_target) {
  owner->set_state(this);
}


TypeInfoCodeGenState::TypeInfoCodeGenState(CodeGenerator* owner,
                                           Slot* slot,
                                           TypeInfo type_info)
    : CodeGenState(owner),
      slot_(slot) {
  owner->set_state(this);
  old_type_info_ = owner->set_type_info(slot, type_info);
118 119 120 121 122 123 124 125 126
}


CodeGenState::~CodeGenState() {
  ASSERT(owner_->state() == this);
  owner_->set_state(previous_);
}


127 128 129 130
TypeInfoCodeGenState::~TypeInfoCodeGenState() {
  owner()->set_type_info(slot_, old_type_info_);
}

131
// -------------------------------------------------------------------------
132
// CodeGenerator implementation
133

134 135
int CodeGenerator::inlined_write_barrier_size_ = -1;

136 137
CodeGenerator::CodeGenerator(MacroAssembler* masm)
    : deferred_(8),
138
      masm_(masm),
139
      info_(NULL),
140
      frame_(NULL),
141
      allocator_(NULL),
142 143
      cc_reg_(al),
      state_(NULL),
144
      loop_nesting_(0),
145
      type_info_(NULL),
146
      function_return_(JumpTarget::BIDIRECTIONAL),
147
      function_return_is_shadowed_(false) {
148 149 150 151
}


// Calling conventions:
152
// fp: caller's frame pointer
153
// sp: stack pointer
154
// r1: called JS function
155 156
// cp: callee's context

157
void CodeGenerator::Generate(CompilationInfo* info) {
158
  // Record the position for debugging purposes.
159
  CodeForFunctionPosition(info->function());
160
  Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
161 162

  // Initialize state.
163
  info_ = info;
164 165 166 167 168

  int slots = scope()->num_parameters() + scope()->num_stack_slots();
  ScopedVector<TypeInfo> type_info_array(slots);
  type_info_ = &type_info_array;

169 170 171
  ASSERT(allocator_ == NULL);
  RegisterAllocator register_allocator(this);
  allocator_ = &register_allocator;
172
  ASSERT(frame_ == NULL);
173
  frame_ = new VirtualFrame();
174
  cc_reg_ = al;
175 176 177

  // Adjust for function-level loop nesting.
  ASSERT_EQ(0, loop_nesting_);
178
  loop_nesting_ = info->is_in_loop() ? 1 : 0;
179

180 181
  {
    CodeGenState state(this);
182

183 184 185 186
    // Entry:
    // Stack: receiver, arguments
    // lr: return address
    // fp: caller's frame pointer
187
    // sp: stack pointer
188
    // r1: called JS function
189
    // cp: callee's context
190
    allocator_->Initialize();
191

192 193
#ifdef DEBUG
    if (strlen(FLAG_stop_at) > 0 &&
194
        info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
195
      frame_->SpillAll();
196
      __ stop("stop-at");
197 198 199
    }
#endif

200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
    frame_->Enter();
    // tos: code slot

    // Allocate space for locals and initialize them.  This also checks
    // for stack overflow.
    frame_->AllocateStackSlots();

    frame_->AssertIsSpilled();
    int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
    if (heap_slots > 0) {
      // Allocate local context.
      // Get outer context and create a new context based on it.
      __ ldr(r0, frame_->Function());
      frame_->EmitPush(r0);
      if (heap_slots <= FastNewContextStub::kMaximumSlots) {
        FastNewContextStub stub(heap_slots);
        frame_->CallStub(&stub, 1);
      } else {
        frame_->CallRuntime(Runtime::kNewContext, 1);
      }
220

221
#ifdef DEBUG
222 223 224 225 226
      JumpTarget verified_true;
      __ cmp(r0, cp);
      verified_true.Branch(eq);
      __ stop("NewContext: r0 is expected to be the same as cp");
      verified_true.Bind();
227
#endif
228 229 230
      // Update context local.
      __ str(cp, frame_->Context());
    }
231

232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
    // TODO(1241774): Improve this code:
    // 1) only needed if we have a context
    // 2) no need to recompute context ptr every single time
    // 3) don't copy parameter operand code from SlotOperand!
    {
      Comment cmnt2(masm_, "[ copy context parameters into .context");
      // Note that iteration order is relevant here! If we have the same
      // parameter twice (e.g., function (x, y, x)), and that parameter
      // needs to be copied into the context, it must be the last argument
      // passed to the parameter that needs to be copied. This is a rare
      // case so we don't check for it, instead we rely on the copying
      // order: such a parameter is copied repeatedly into the same
      // context location and thus the last value is what is seen inside
      // the function.
      frame_->AssertIsSpilled();
      for (int i = 0; i < scope()->num_parameters(); i++) {
        Variable* par = scope()->parameter(i);
249
        Slot* slot = par->AsSlot();
250 251 252 253 254 255 256 257 258
        if (slot != NULL && slot->type() == Slot::CONTEXT) {
          ASSERT(!scope()->is_global_scope());  // No params in global scope.
          __ ldr(r1, frame_->ParameterAt(i));
          // Loads r2 with context; used below in RecordWrite.
          __ str(r1, SlotOperand(slot, r2));
          // Load the offset into r3.
          int slot_offset =
              FixedArray::kHeaderSize + slot->index() * kPointerSize;
          __ RecordWrite(r2, Operand(slot_offset), r3, r1);
259 260
        }
      }
261
    }
262

263 264 265 266 267 268
    // Store the arguments object.  This must happen after context
    // initialization because the arguments object may be stored in
    // the context.
    if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
      StoreArgumentsObject(true);
    }
269

270 271 272
    // Initialize ThisFunction reference if present.
    if (scope()->is_function_scope() && scope()->function() != NULL) {
      frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
273
      StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
274 275
    }

276 277
    // Initialize the function return target after the locals are set
    // up, because it needs the expected frame height from the frame.
278
    function_return_.SetExpectedHeight();
279 280
    function_return_is_shadowed_ = false;

281 282 283
    // Generate code to 'execute' declarations and initialize functions
    // (source elements). In case of an illegal redeclaration we need to
    // handle that instead of processing the declarations.
284
    if (scope()->HasIllegalRedeclaration()) {
285
      Comment cmnt(masm_, "[ illegal redeclarations");
286
      scope()->VisitIllegalRedeclaration(this);
287 288
    } else {
      Comment cmnt(masm_, "[ declarations");
289
      ProcessDeclarations(scope()->declarations());
290 291
      // Bail out if a stack-overflow exception occurred when processing
      // declarations.
292
      if (HasStackOverflow()) return;
293 294
    }

295
    if (FLAG_trace) {
296
      frame_->CallRuntime(Runtime::kTraceEnter, 0);
297
      // Ignore the return value.
298
    }
299 300 301 302

    // Compile the body of the function in a vanilla state. Don't
    // bother compiling all the code if the scope has an illegal
    // redeclaration.
303
    if (!scope()->HasIllegalRedeclaration()) {
304 305 306 307 308
      Comment cmnt(masm_, "[ function body");
#ifdef DEBUG
      bool is_builtin = Bootstrapper::IsActive();
      bool should_trace =
          is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
309
      if (should_trace) {
310
        frame_->CallRuntime(Runtime::kDebugTrace, 0);
311
        // Ignore the return value.
312
      }
313
#endif
314
      VisitStatements(info->function()->body());
315 316 317
    }
  }

318 319 320 321 322 323
  // Handle the return from the function.
  if (has_valid_frame()) {
    // If there is a valid frame, control flow can fall off the end of
    // the body.  In that case there is an implicit return statement.
    ASSERT(!function_return_is_shadowed_);
    frame_->PrepareForReturn();
324
    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
325 326 327 328 329
    if (function_return_.is_bound()) {
      function_return_.Jump();
    } else {
      function_return_.Bind();
      GenerateReturnSequence();
330
    }
331 332 333 334 335 336 337 338
  } else if (function_return_.is_linked()) {
    // If the return target has dangling jumps to it, then we have not
    // yet generated the return sequence.  This can happen when (a)
    // control does not flow off the end of the body so we did not
    // compile an artificial return statement just above, and (b) there
    // are return statements in the body but (c) they are all shadowed.
    function_return_.Bind();
    GenerateReturnSequence();
339
  }
340

341
  // Adjust for function-level loop nesting.
342
  ASSERT(loop_nesting_ == info->is_in_loop()? 1 : 0);
343 344
  loop_nesting_ = 0;

345 346 347
  // Code generation state must be reset.
  ASSERT(!has_cc());
  ASSERT(state_ == NULL);
348
  ASSERT(loop_nesting() == 0);
349 350 351 352 353
  ASSERT(!function_return_is_shadowed_);
  function_return_.Unuse();
  DeleteFrame();

  // Process any deferred code using the register allocator.
354
  if (!HasStackOverflow()) {
355 356
    ProcessDeferred();
  }
357 358

  allocator_ = NULL;
359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
  type_info_ = NULL;
}


int CodeGenerator::NumberOfSlot(Slot* slot) {
  if (slot == NULL) return kInvalidSlotNumber;
  switch (slot->type()) {
    case Slot::PARAMETER:
      return slot->index();
    case Slot::LOCAL:
      return slot->index() + scope()->num_parameters();
    default:
      break;
  }
  return kInvalidSlotNumber;
374 375 376
}


377
MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
378 379 380 381 382 383 384 385 386 387 388 389
  // Currently, this assertion will fail if we try to assign to
  // a constant variable that is constant because it is read-only
  // (such as the variable referring to a named function expression).
  // We need to implement assignments to read-only variables.
  // Ideally, we should do this during AST generation (by converting
  // such assignments into expression statements); however, in general
  // we may not be able to make the decision until past AST generation,
  // that is when the entire program is known.
  ASSERT(slot != NULL);
  int index = slot->index();
  switch (slot->type()) {
    case Slot::PARAMETER:
390
      return frame_->ParameterAt(index);
391

392
    case Slot::LOCAL:
393
      return frame_->LocalAt(index);
394 395 396 397 398 399

    case Slot::CONTEXT: {
      // Follow the context chain if necessary.
      ASSERT(!tmp.is(cp));  // do not overwrite context register
      Register context = cp;
      int chain_length = scope()->ContextChainLength(slot->var()->scope());
400
      for (int i = 0; i < chain_length; i++) {
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
        // Load the closure.
        // (All contexts, even 'with' contexts, have a closure,
        // and it is the same for all contexts inside a function.
        // There is no need to go to the function context first.)
        __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
        // Load the function context (which is the incoming, outer context).
        __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
        context = tmp;
      }
      // We may have a 'with' context now. Get the function context.
      // (In fact this mov may never be the needed, since the scope analysis
      // may not permit a direct context access in this case and thus we are
      // always at a function context. However it is safe to dereference be-
      // cause the function context of a function context is itself. Before
      // deleting this mov we should try to create a counter-example first,
      // though...)
      __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
      return ContextOperand(tmp, index);
    }

    default:
      UNREACHABLE();
      return MemOperand(r0, 0);
  }
}


428 429 430 431 432
MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
    Slot* slot,
    Register tmp,
    Register tmp2,
    JumpTarget* slow) {
433 434
  ASSERT(slot->type() == Slot::CONTEXT);
  Register context = cp;
435

436 437 438 439 440 441
  for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
    if (s->num_heap_slots() > 0) {
      if (s->calls_eval()) {
        // Check that extension is NULL.
        __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
        __ tst(tmp2, tmp2);
442
        slow->Branch(ne);
443 444 445 446 447 448 449
      }
      __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
      __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
      context = tmp;
    }
  }
  // Check that last extension is NULL.
450
  __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
451
  __ tst(tmp2, tmp2);
452
  slow->Branch(ne);
453
  __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
454
  return ContextOperand(tmp, slot->index());
455 456 457
}


458 459 460 461 462 463
// Loads a value on TOS. If it is a boolean value, the result may have been
// (partially) translated into branches, or it may have set the condition
// code register. If force_cc is set, the value is forced to set the
// condition code register and no value is pushed. If the condition code
// register was set, has_cc() is true and cc_reg_ contains the condition to
// test for 'true'.
464
void CodeGenerator::LoadCondition(Expression* x,
465 466
                                  JumpTarget* true_target,
                                  JumpTarget* false_target,
467
                                  bool force_cc) {
468
  ASSERT(!has_cc());
469
  int original_height = frame_->height();
470

471
  { ConditionCodeGenState new_state(this, true_target, false_target);
472
    Visit(x);
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488

    // If we hit a stack overflow, we may not have actually visited
    // the expression.  In that case, we ensure that we have a
    // valid-looking frame state because we will continue to generate
    // code as we unwind the C++ stack.
    //
    // It's possible to have both a stack overflow and a valid frame
    // state (eg, a subexpression overflowed, visiting it returned
    // with a dummied frame state, and visiting this expression
    // returned with a normal-looking state).
    if (HasStackOverflow() &&
        has_valid_frame() &&
        !has_cc() &&
        frame_->height() == original_height) {
      true_target->Jump();
    }
489
  }
490
  if (force_cc && frame_ != NULL && !has_cc()) {
491 492
    // Convert the TOS value to a boolean in the condition code register.
    ToBoolean(true_target, false_target);
493
  }
494 495 496 497
  ASSERT(!force_cc || !has_valid_frame() || has_cc());
  ASSERT(!has_valid_frame() ||
         (has_cc() && frame_->height() == original_height) ||
         (!has_cc() && frame_->height() == original_height + 1));
498 499 500
}


501
void CodeGenerator::Load(Expression* expr) {
502 503 504 505
  // We generally assume that we are not in a spilled scope for most
  // of the code generator.  A failure to ensure this caused issue 815
  // and this assert is designed to catch similar issues.
  frame_->AssertIsNotSpilled();
506 507 508
#ifdef DEBUG
  int original_height = frame_->height();
#endif
509 510
  JumpTarget true_target;
  JumpTarget false_target;
511
  LoadCondition(expr, &true_target, &false_target, false);
512 513

  if (has_cc()) {
514
    // Convert cc_reg_ into a boolean value.
515 516
    JumpTarget loaded;
    JumpTarget materialize_true;
517
    materialize_true.Branch(cc_reg_);
518
    frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
519 520
    loaded.Jump();
    materialize_true.Bind();
521
    frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
522
    loaded.Bind();
523
    cc_reg_ = al;
kmillikin@chromium.org's avatar
kmillikin@chromium.org committed
524
  }
525

kmillikin@chromium.org's avatar
kmillikin@chromium.org committed
526
  if (true_target.is_linked() || false_target.is_linked()) {
527 528
    // We have at least one condition value that has been "translated"
    // into a branch, thus it needs to be loaded explicitly.
529
    JumpTarget loaded;
530 531 532
    if (frame_ != NULL) {
      loaded.Jump();  // Don't lose the current TOS.
    }
kmillikin@chromium.org's avatar
kmillikin@chromium.org committed
533
    bool both = true_target.is_linked() && false_target.is_linked();
534
    // Load "true" if necessary.
535
    if (true_target.is_linked()) {
536
      true_target.Bind();
537
      frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
538 539 540 541 542
    }
    // If both "true" and "false" need to be loaded jump across the code for
    // "false".
    if (both) {
      loaded.Jump();
543
    }
544
    // Load "false" if necessary.
545
    if (false_target.is_linked()) {
546
      false_target.Bind();
547
      frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
548
    }
549 550
    // A value is loaded on all paths reaching this point.
    loaded.Bind();
551
  }
552
  ASSERT(has_valid_frame());
kmillikin@chromium.org's avatar
kmillikin@chromium.org committed
553
  ASSERT(!has_cc());
554
  ASSERT_EQ(original_height + 1, frame_->height());
555 556 557
}


558
void CodeGenerator::LoadGlobal() {
559 560 561
  Register reg = frame_->GetTOSRegister();
  __ ldr(reg, GlobalObject());
  frame_->EmitPush(reg);
562 563 564
}


565
void CodeGenerator::LoadGlobalReceiver(Register scratch) {
566 567 568 569 570
  Register reg = frame_->GetTOSRegister();
  __ ldr(reg, ContextOperand(cp, Context::GLOBAL_INDEX));
  __ ldr(reg,
         FieldMemOperand(reg, GlobalObject::kGlobalReceiverOffset));
  frame_->EmitPush(reg);
571 572 573
}


574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
  if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
  ASSERT(scope()->arguments_shadow() != NULL);
  // We don't want to do lazy arguments allocation for functions that
  // have heap-allocated contexts, because it interfers with the
  // uninitialized const tracking in the context objects.
  return (scope()->num_heap_slots() > 0)
      ? EAGER_ARGUMENTS_ALLOCATION
      : LAZY_ARGUMENTS_ALLOCATION;
}


void CodeGenerator::StoreArgumentsObject(bool initial) {
  ArgumentsAllocationMode mode = ArgumentsMode();
  ASSERT(mode != NO_ARGUMENTS_ALLOCATION);

  Comment cmnt(masm_, "[ store arguments object");
  if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
    // When using lazy arguments allocation, we store the hole value
    // as a sentinel indicating that the arguments object hasn't been
    // allocated yet.
595
    frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
596
  } else {
597
    frame_->SpillAll();
598 599 600 601 602 603 604 605
    ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
    __ ldr(r2, frame_->Function());
    // The receiver is below the arguments, the return address, and the
    // frame pointer on the stack.
    const int kReceiverDisplacement = 2 + scope()->num_parameters();
    __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
    __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
    frame_->Adjust(3);
606
    __ Push(r2, r1, r0);
607 608 609 610
    frame_->CallStub(&stub, 3);
    frame_->EmitPush(r0);
  }

611 612 613 614
  Variable* arguments = scope()->arguments();
  Variable* shadow = scope()->arguments_shadow();
  ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
  ASSERT(shadow != NULL && shadow->AsSlot() != NULL);
615 616 617 618 619
  JumpTarget done;
  if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
    // We have to skip storing into the arguments slot if it has
    // already been written to. This can happen if the a function
    // has a local variable named 'arguments'.
620
    LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
621
    Register arguments = frame_->PopToRegister();
622
    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
623
    __ cmp(arguments, ip);
624 625
    done.Branch(ne);
  }
626
  StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
627
  if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
628
  StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
629 630 631
}


632 633 634
void CodeGenerator::LoadTypeofExpression(Expression* expr) {
  // Special handling of identifiers as subexpressions of typeof.
  Variable* variable = expr->AsVariableProxy()->AsVariable();
635
  if (variable != NULL && !variable->is_this() && variable->is_global()) {
636 637 638
    // For a global variable we build the property reference
    // <global>.<variable> and perform a (regular non-contextual) property
    // load to make sure we do not get reference errors.
639 640
    Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
    Literal key(variable->name());
641
    Property property(&global, &key, RelocInfo::kNoPosition);
642
    Reference ref(this, &property);
643
    ref.GetValue();
644
  } else if (variable != NULL && variable->AsSlot() != NULL) {
645 646
    // For a variable that rewrites to a slot, we signal it is the immediate
    // subexpression of a typeof.
647
    LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
648
  } else {
649
    // Anything else can be handled normally.
650
    Load(expr);
651 652 653 654
  }
}


655 656 657 658 659 660 661
Reference::Reference(CodeGenerator* cgen,
                     Expression* expression,
                     bool persist_after_get)
    : cgen_(cgen),
      expression_(expression),
      type_(ILLEGAL),
      persist_after_get_(persist_after_get) {
662 663 664 665
  // We generally assume that we are not in a spilled scope for most
  // of the code generator.  A failure to ensure this caused issue 815
  // and this assert is designed to catch similar issues.
  cgen->frame()->AssertIsNotSpilled();
666 667 668 669 670
  cgen->LoadReference(this);
}


Reference::~Reference() {
671
  ASSERT(is_unloaded() || is_illegal());
672 673 674
}


675
void CodeGenerator::LoadReference(Reference* ref) {
676
  Comment cmnt(masm_, "[ LoadReference");
677 678 679 680 681
  Expression* e = ref->expression();
  Property* property = e->AsProperty();
  Variable* var = e->AsVariableProxy()->AsVariable();

  if (property != NULL) {
682 683
    // The expression is either a property or a variable proxy that rewrites
    // to a property.
684
    Load(property->obj());
685
    if (property->key()->IsPropertyName()) {
686 687
      ref->set_type(Reference::NAMED);
    } else {
688
      Load(property->key());
689 690 691
      ref->set_type(Reference::KEYED);
    }
  } else if (var != NULL) {
692 693
    // The expression is a variable proxy that does not rewrite to a
    // property.  Global variables are treated as named property references.
694 695 696 697
    if (var->is_global()) {
      LoadGlobal();
      ref->set_type(Reference::NAMED);
    } else {
698
      ASSERT(var->AsSlot() != NULL);
699
      ref->set_type(Reference::SLOT);
700 701
    }
  } else {
702
    // Anything else is a runtime error.
703
    Load(e);
704
    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
705 706 707 708
  }
}


709
void CodeGenerator::UnloadReference(Reference* ref) {
710 711 712 713
  int size = ref->size();
  ref->set_unloaded();
  if (size == 0) return;

714
  // Pop a reference from the stack while preserving TOS.
715
  VirtualFrame::RegisterAllocationScope scope(this);
716
  Comment cmnt(masm_, "[ UnloadReference");
717
  if (size > 0) {
718
    Register tos = frame_->PopToRegister();
719
    frame_->Drop(size);
720
    frame_->EmitPush(tos);
721 722 723 724 725 726 727
  }
}


// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
// register to a boolean in the condition code register. The code
// may jump to 'false_target' in case the register converts to 'false'.
728 729
void CodeGenerator::ToBoolean(JumpTarget* true_target,
                              JumpTarget* false_target) {
730
  // Note: The generated code snippet does not change stack variables.
731
  //       Only the condition code should be set.
732
  bool known_smi = frame_->KnownSmiAt(0);
733
  Register tos = frame_->PopToRegister();
734 735 736

  // Fast case checks

737
  // Check if the value is 'false'.
738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
  if (!known_smi) {
    __ LoadRoot(ip, Heap::kFalseValueRootIndex);
    __ cmp(tos, ip);
    false_target->Branch(eq);

    // Check if the value is 'true'.
    __ LoadRoot(ip, Heap::kTrueValueRootIndex);
    __ cmp(tos, ip);
    true_target->Branch(eq);

    // Check if the value is 'undefined'.
    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
    __ cmp(tos, ip);
    false_target->Branch(eq);
  }
753

754
  // Check if the value is a smi.
755
  __ cmp(tos, Operand(Smi::FromInt(0)));
756 757 758 759 760 761

  if (!known_smi) {
    false_target->Branch(eq);
    __ tst(tos, Operand(kSmiTagMask));
    true_target->Branch(eq);

762 763 764 765 766 767 768 769 770 771 772
    // Slow case.
    if (CpuFeatures::IsSupported(VFP3)) {
      CpuFeatures::Scope scope(VFP3);
      // Implements the slow case by using ToBooleanStub.
      // The ToBooleanStub takes a single argument, and
      // returns a non-zero value for true, or zero for false.
      // Both the argument value and the return value use the
      // register assigned to tos_
      ToBooleanStub stub(tos);
      frame_->CallStub(&stub, 0);
      // Convert the result in "tos" to a condition code.
773
      __ cmp(tos, Operand(0, RelocInfo::NONE));
774 775 776 777 778 779 780 781
    } else {
      // Implements slow case by calling the runtime.
      frame_->EmitPush(tos);
      frame_->CallRuntime(Runtime::kToBool, 1);
      // Convert the result (r0) to a condition code.
      __ LoadRoot(ip, Heap::kFalseValueRootIndex);
      __ cmp(r0, ip);
    }
782
  }
783 784 785 786 787

  cc_reg_ = ne;
}


788
void CodeGenerator::GenericBinaryOperation(Token::Value op,
789
                                           OverwriteMode overwrite_mode,
790
                                           GenerateInlineSmi inline_smi,
791
                                           int constant_rhs) {
792 793 794
  // top of virtual frame: y
  // 2nd elt. on virtual frame : x
  // result : top of virtual frame
795 796 797

  // Stub is entered with a call: 'return address' is in lr.
  switch (op) {
798 799
    case Token::ADD:
    case Token::SUB:
800 801 802 803 804 805 806 807 808
      if (inline_smi) {
        JumpTarget done;
        Register rhs = frame_->PopToRegister();
        Register lhs = frame_->PopToRegister(rhs);
        Register scratch = VirtualFrame::scratch0();
        __ orr(scratch, rhs, Operand(lhs));
        // Check they are both small and positive.
        __ tst(scratch, Operand(kSmiTagMask | 0xc0000000));
        ASSERT(rhs.is(r0) || lhs.is(r0));  // r0 is free now.
809
        STATIC_ASSERT(kSmiTag == 0);
810 811 812 813 814 815 816 817 818 819 820 821 822 823 824
        if (op == Token::ADD) {
          __ add(r0, lhs, Operand(rhs), LeaveCC, eq);
        } else {
          __ sub(r0, lhs, Operand(rhs), LeaveCC, eq);
        }
        done.Branch(eq);
        GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
        frame_->SpillAll();
        frame_->CallStub(&stub, 0);
        done.Bind();
        frame_->EmitPush(r0);
        break;
      } else {
        // Fall through!
      }
825 826 827
    case Token::BIT_OR:
    case Token::BIT_AND:
    case Token::BIT_XOR:
828 829 830 831 832 833 834 835
      if (inline_smi) {
        bool rhs_is_smi = frame_->KnownSmiAt(0);
        bool lhs_is_smi = frame_->KnownSmiAt(1);
        Register rhs = frame_->PopToRegister();
        Register lhs = frame_->PopToRegister(rhs);
        Register smi_test_reg;
        Condition cond;
        if (!rhs_is_smi || !lhs_is_smi) {
836
          if (rhs_is_smi) {
837
            smi_test_reg = lhs;
838 839
          } else if (lhs_is_smi) {
            smi_test_reg = rhs;
840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856
          } else {
            smi_test_reg = VirtualFrame::scratch0();
            __ orr(smi_test_reg, rhs, Operand(lhs));
          }
          // Check they are both Smis.
          __ tst(smi_test_reg, Operand(kSmiTagMask));
          cond = eq;
        } else {
          cond = al;
        }
        ASSERT(rhs.is(r0) || lhs.is(r0));  // r0 is free now.
        if (op == Token::BIT_OR) {
          __ orr(r0, lhs, Operand(rhs), LeaveCC, cond);
        } else if (op == Token::BIT_AND) {
          __ and_(r0, lhs, Operand(rhs), LeaveCC, cond);
        } else {
          ASSERT(op == Token::BIT_XOR);
857
          STATIC_ASSERT(kSmiTag == 0);
858 859 860 861 862 863 864 865 866 867 868 869 870 871 872
          __ eor(r0, lhs, Operand(rhs), LeaveCC, cond);
        }
        if (cond != al) {
          JumpTarget done;
          done.Branch(cond);
          GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
          frame_->SpillAll();
          frame_->CallStub(&stub, 0);
          done.Bind();
        }
        frame_->EmitPush(r0);
        break;
      } else {
        // Fall through!
      }
873 874 875 876 877 878
    case Token::MUL:
    case Token::DIV:
    case Token::MOD:
    case Token::SHL:
    case Token::SHR:
    case Token::SAR: {
879 880
      Register rhs = frame_->PopToRegister();
      Register lhs = frame_->PopToRegister(rhs);  // Don't pop to rhs register.
881 882 883
      GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
      frame_->SpillAll();
      frame_->CallStub(&stub, 0);
884 885 886 887 888 889 890
      frame_->EmitPush(r0);
      break;
    }

    case Token::COMMA: {
      Register scratch = frame_->PopToRegister();
      // Simply discard left value.
891
      frame_->Drop();
892
      frame_->EmitPush(scratch);
893
      break;
894
    }
895

896 897 898 899
    default:
      // Other cases should have been handled before this point.
      UNREACHABLE();
      break;
900 901 902
  }
}

903

904
class DeferredInlineSmiOperation: public DeferredCode {
905
 public:
906
  DeferredInlineSmiOperation(Token::Value op,
907
                             int value,
908
                             bool reversed,
909 910
                             OverwriteMode overwrite_mode,
                             Register tos)
911
      : op_(op),
912
        value_(value),
913
        reversed_(reversed),
914 915
        overwrite_mode_(overwrite_mode),
        tos_register_(tos) {
916 917
    set_comment("[ DeferredInlinedSmiOperation");
  }
918

919
  virtual void Generate();
920 921 922 923 924 925 926 927
  // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
  // Exit(). Currently on ARM SaveRegisters() and RestoreRegisters() are empty
  // methods, it is the responsibility of the deferred code to save and restore
  // registers.
  virtual bool AutoSaveAndRestore() { return false; }

  void JumpToNonSmiInput(Condition cond);
  void JumpToAnswerOutOfRange(Condition cond);
928

929
 private:
930 931 932 933 934 935
  void GenerateNonSmiInput();
  void GenerateAnswerOutOfRange();
  void WriteNonSmiAnswer(Register answer,
                         Register heap_number,
                         Register scratch);

936 937 938
  Token::Value op_;
  int value_;
  bool reversed_;
939
  OverwriteMode overwrite_mode_;
940
  Register tos_register_;
941 942
  Label non_smi_input_;
  Label answer_out_of_range_;
943 944 945
};


946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968
// For bit operations we try harder and handle the case where the input is not
// a Smi but a 32bits integer without calling the generic stub.
void DeferredInlineSmiOperation::JumpToNonSmiInput(Condition cond) {
  ASSERT(Token::IsBitOp(op_));

  __ b(cond, &non_smi_input_);
}


// For bit operations the result is always 32bits so we handle the case where
// the result does not fit in a Smi without calling the generic stub.
void DeferredInlineSmiOperation::JumpToAnswerOutOfRange(Condition cond) {
  ASSERT(Token::IsBitOp(op_));

  if ((op_ == Token::SHR) && !CpuFeatures::IsSupported(VFP3)) {
    // >>> requires an unsigned to double conversion and the non VFP code
    // does not support this conversion.
    __ b(cond, entry_label());
  } else {
    __ b(cond, &answer_out_of_range_);
  }
}

969 970 971 972 973

// On entry the non-constant side of the binary operation is in tos_register_
// and the constant smi side is nowhere.  The tos_register_ is not used by the
// virtual frame.  On exit the answer is in the tos_register_ and the virtual
// frame is unchanged.
974
void DeferredInlineSmiOperation::Generate() {
975 976 977
  VirtualFrame copied_frame(*frame_state()->frame());
  copied_frame.SpillAll();

978 979
  Register lhs = r1;
  Register rhs = r0;
980 981
  switch (op_) {
    case Token::ADD: {
982
      // Revert optimistic add.
983
      if (reversed_) {
984
        __ sub(r0, tos_register_, Operand(Smi::FromInt(value_)));
985 986
        __ mov(r1, Operand(Smi::FromInt(value_)));
      } else {
987
        __ sub(r1, tos_register_, Operand(Smi::FromInt(value_)));
988
        __ mov(r0, Operand(Smi::FromInt(value_)));
989
      }
990 991
      break;
    }
992

993
    case Token::SUB: {
994
      // Revert optimistic sub.
995
      if (reversed_) {
996
        __ rsb(r0, tos_register_, Operand(Smi::FromInt(value_)));
997 998
        __ mov(r1, Operand(Smi::FromInt(value_)));
      } else {
999
        __ add(r1, tos_register_, Operand(Smi::FromInt(value_)));
1000
        __ mov(r0, Operand(Smi::FromInt(value_)));
1001
      }
1002 1003
      break;
    }
1004

1005 1006 1007 1008
    // For these operations there is no optimistic operation that needs to be
    // reverted.
    case Token::MUL:
    case Token::MOD:
1009 1010
    case Token::BIT_OR:
    case Token::BIT_XOR:
1011
    case Token::BIT_AND:
1012 1013 1014
    case Token::SHL:
    case Token::SHR:
    case Token::SAR: {
1015 1016
      if (tos_register_.is(r1)) {
        __ mov(r0, Operand(Smi::FromInt(value_)));
1017
      } else {
1018
        ASSERT(tos_register_.is(r0));
1019
        __ mov(r1, Operand(Smi::FromInt(value_)));
1020
      }
1021 1022 1023 1024
      if (reversed_ == tos_register_.is(r1)) {
          lhs = r0;
          rhs = r1;
      }
1025
      break;
1026
    }
1027

1028
    default:
1029
      // Other cases should have been handled before this point.
1030 1031
      UNREACHABLE();
      break;
1032
  }
1033

1034
  GenericBinaryOpStub stub(op_, overwrite_mode_, lhs, rhs, value_);
1035
  __ CallStub(&stub);
1036

1037 1038 1039 1040 1041
  // The generic stub returns its value in r0, but that's not
  // necessarily what we want.  We want whatever the inlined code
  // expected, which is that the answer is in the same register as
  // the operand was.
  __ Move(tos_register_, r0);
1042 1043 1044 1045 1046

  // The tos register was not in use for the virtual frame that we
  // came into this function with, so we can merge back to that frame
  // without trashing it.
  copied_frame.MergeTo(frame_state()->frame());
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131

  Exit();

  if (non_smi_input_.is_linked()) {
    GenerateNonSmiInput();
  }

  if (answer_out_of_range_.is_linked()) {
    GenerateAnswerOutOfRange();
  }
}


// Convert and write the integer answer into heap_number.
void DeferredInlineSmiOperation::WriteNonSmiAnswer(Register answer,
                                                   Register heap_number,
                                                   Register scratch) {
  if (CpuFeatures::IsSupported(VFP3)) {
    CpuFeatures::Scope scope(VFP3);
    __ vmov(s0, answer);
    if (op_ == Token::SHR) {
      __ vcvt_f64_u32(d0, s0);
    } else {
      __ vcvt_f64_s32(d0, s0);
    }
    __ sub(scratch, heap_number, Operand(kHeapObjectTag));
    __ vstr(d0, scratch, HeapNumber::kValueOffset);
  } else {
    WriteInt32ToHeapNumberStub stub(answer, heap_number, scratch);
    __ CallStub(&stub);
  }
}


void DeferredInlineSmiOperation::GenerateNonSmiInput() {
  // We know the left hand side is not a Smi and the right hand side is an
  // immediate value (value_) which can be represented as a Smi. We only
  // handle bit operations.
  ASSERT(Token::IsBitOp(op_));

  if (FLAG_debug_code) {
    __ Abort("Should not fall through!");
  }

  __ bind(&non_smi_input_);
  if (FLAG_debug_code) {
    __ AbortIfSmi(tos_register_);
  }

  // This routine uses the registers from r2 to r6.  At the moment they are
  // not used by the register allocator, but when they are it should use
  // SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above.

  Register heap_number_map = r7;
  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
  __ ldr(r3, FieldMemOperand(tos_register_, HeapNumber::kMapOffset));
  __ cmp(r3, heap_number_map);
  // Not a number, fall back to the GenericBinaryOpStub.
  __ b(ne, entry_label());

  Register int32 = r2;
  // Not a 32bits signed int, fall back to the GenericBinaryOpStub.
  __ ConvertToInt32(tos_register_, int32, r4, r5, entry_label());

  // tos_register_ (r0 or r1): Original heap number.
  // int32: signed 32bits int.

  Label result_not_a_smi;
  int shift_value = value_ & 0x1f;
  switch (op_) {
    case Token::BIT_OR:  __ orr(int32, int32, Operand(value_)); break;
    case Token::BIT_XOR: __ eor(int32, int32, Operand(value_)); break;
    case Token::BIT_AND: __ and_(int32, int32, Operand(value_)); break;
    case Token::SAR:
      ASSERT(!reversed_);
      if (shift_value != 0) {
         __ mov(int32, Operand(int32, ASR, shift_value));
      }
      break;
    case Token::SHR:
      ASSERT(!reversed_);
      if (shift_value != 0) {
        __ mov(int32, Operand(int32, LSR, shift_value), SetCC);
      } else {
        // SHR is special because it is required to produce a positive answer.
1132
        __ cmp(int32, Operand(0, RelocInfo::NONE));
1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
      }
      if (CpuFeatures::IsSupported(VFP3)) {
        __ b(mi, &result_not_a_smi);
      } else {
        // Non VFP code cannot convert from unsigned to double, so fall back
        // to GenericBinaryOpStub.
        __ b(mi, entry_label());
      }
      break;
    case Token::SHL:
      ASSERT(!reversed_);
      if (shift_value != 0) {
        __ mov(int32, Operand(int32, LSL, shift_value));
      }
      break;
    default: UNREACHABLE();
  }
  // Check that the *signed* result fits in a smi. Not necessary for AND, SAR
  // if the shift if more than 0 or SHR if the shit is more than 1.
  if (!( (op_ == Token::AND) ||
        ((op_ == Token::SAR) && (shift_value > 0)) ||
        ((op_ == Token::SHR) && (shift_value > 1)))) {
    __ add(r3, int32, Operand(0x40000000), SetCC);
    __ b(mi, &result_not_a_smi);
  }
  __ mov(tos_register_, Operand(int32, LSL, kSmiTagSize));
  Exit();

  if (result_not_a_smi.is_linked()) {
    __ bind(&result_not_a_smi);
    if (overwrite_mode_ != OVERWRITE_LEFT) {
      ASSERT((overwrite_mode_ == NO_OVERWRITE) ||
             (overwrite_mode_ == OVERWRITE_RIGHT));
      // If the allocation fails, fall back to the GenericBinaryOpStub.
      __ AllocateHeapNumber(r4, r5, r6, heap_number_map, entry_label());
      // Nothing can go wrong now, so overwrite tos.
      __ mov(tos_register_, Operand(r4));
    }

    // int32: answer as signed 32bits integer.
    // tos_register_: Heap number to write the answer into.
    WriteNonSmiAnswer(int32, tos_register_, r3);

    Exit();
  }
}


void DeferredInlineSmiOperation::GenerateAnswerOutOfRange() {
  // The input from a bitwise operation were Smis but the result cannot fit
1183 1184 1185
  // into a Smi, so we store it into a heap number. VirtualFrame::scratch0()
  // holds the untagged result to be converted.  tos_register_ contains the
  // input.  See the calls to JumpToAnswerOutOfRange to see how we got here.
1186 1187 1188
  ASSERT(Token::IsBitOp(op_));
  ASSERT(!reversed_);

1189 1190
  Register untagged_result = VirtualFrame::scratch0();

1191 1192 1193 1194 1195 1196
  if (FLAG_debug_code) {
    __ Abort("Should not fall through!");
  }

  __ bind(&answer_out_of_range_);
  if (((value_ & 0x1f) == 0) && (op_ == Token::SHR)) {
1197 1198 1199
    // >>> 0 is a special case where the untagged_result register is not set up
    // yet.  We untag the input to get it.
    __ mov(untagged_result, Operand(tos_register_, ASR, kSmiTagSize));
1200 1201 1202 1203 1204 1205 1206
  }

  // This routine uses the registers from r2 to r6.  At the moment they are
  // not used by the register allocator, but when they are it should use
  // SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above.

  // Allocate the result heap number.
1207
  Register heap_number_map = VirtualFrame::scratch1();
1208 1209 1210 1211
  Register heap_number = r4;
  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
  // If the allocation fails, fall back to the GenericBinaryOpStub.
  __ AllocateHeapNumber(heap_number, r5, r6, heap_number_map, entry_label());
1212
  WriteNonSmiAnswer(untagged_result, heap_number, r3);
1213 1214 1215
  __ mov(tos_register_, Operand(heap_number));

  Exit();
1216
}
1217 1218


1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239
static bool PopCountLessThanEqual2(unsigned int x) {
  x &= x - 1;
  return (x & (x - 1)) == 0;
}


// Returns the index of the lowest bit set.
static int BitPosition(unsigned x) {
  int bit_posn = 0;
  while ((x & 0xf) == 0) {
    bit_posn += 4;
    x >>= 4;
  }
  while ((x & 1) == 0) {
    bit_posn++;
    x >>= 1;
  }
  return bit_posn;
}


1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276
// Can we multiply by x with max two shifts and an add.
// This answers yes to all integers from 2 to 10.
static bool IsEasyToMultiplyBy(int x) {
  if (x < 2) return false;                          // Avoid special cases.
  if (x > (Smi::kMaxValue + 1) >> 2) return false;  // Almost always overflows.
  if (IsPowerOf2(x)) return true;                   // Simple shift.
  if (PopCountLessThanEqual2(x)) return true;       // Shift and add and shift.
  if (IsPowerOf2(x + 1)) return true;               // Patterns like 11111.
  return false;
}


// Can multiply by anything that IsEasyToMultiplyBy returns true for.
// Source and destination may be the same register.  This routine does
// not set carry and overflow the way a mul instruction would.
static void InlineMultiplyByKnownInt(MacroAssembler* masm,
                                     Register source,
                                     Register destination,
                                     int known_int) {
  if (IsPowerOf2(known_int)) {
    masm->mov(destination, Operand(source, LSL, BitPosition(known_int)));
  } else if (PopCountLessThanEqual2(known_int)) {
    int first_bit = BitPosition(known_int);
    int second_bit = BitPosition(known_int ^ (1 << first_bit));
    masm->add(destination, source,
              Operand(source, LSL, second_bit - first_bit));
    if (first_bit != 0) {
      masm->mov(destination, Operand(destination, LSL, first_bit));
    }
  } else {
    ASSERT(IsPowerOf2(known_int + 1));  // Patterns like 1111.
    int the_bit = BitPosition(known_int + 1);
    masm->rsb(destination, source, Operand(source, LSL, the_bit));
  }
}


1277 1278 1279 1280
void CodeGenerator::SmiOperation(Token::Value op,
                                 Handle<Object> value,
                                 bool reversed,
                                 OverwriteMode mode) {
1281 1282
  int int_value = Smi::cast(*value)->value();

1283 1284
  bool both_sides_are_smi = frame_->KnownSmiAt(0);

1285 1286 1287 1288 1289 1290 1291 1292 1293 1294
  bool something_to_inline;
  switch (op) {
    case Token::ADD:
    case Token::SUB:
    case Token::BIT_AND:
    case Token::BIT_OR:
    case Token::BIT_XOR: {
      something_to_inline = true;
      break;
    }
1295 1296 1297 1298
    case Token::SHL: {
      something_to_inline = (both_sides_are_smi || !reversed);
      break;
    }
1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331
    case Token::SHR:
    case Token::SAR: {
      if (reversed) {
        something_to_inline = false;
      } else {
        something_to_inline = true;
      }
      break;
    }
    case Token::MOD: {
      if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
        something_to_inline = false;
      } else {
        something_to_inline = true;
      }
      break;
    }
    case Token::MUL: {
      if (!IsEasyToMultiplyBy(int_value)) {
        something_to_inline = false;
      } else {
        something_to_inline = true;
      }
      break;
    }
    default: {
      something_to_inline = false;
      break;
    }
  }

  if (!something_to_inline) {
    if (!reversed) {
1332 1333 1334
      // Push the rhs onto the virtual frame by putting it in a TOS register.
      Register rhs = frame_->GetTOSRegister();
      __ mov(rhs, Operand(value));
1335 1336
      frame_->EmitPush(rhs, TypeInfo::Smi());
      GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI, int_value);
1337
    } else {
1338 1339 1340 1341 1342
      // Pop the rhs, then push lhs and rhs in the right order.  Only performs
      // at most one pop, the rest takes place in TOS registers.
      Register lhs = frame_->GetTOSRegister();    // Get reg for pushing.
      Register rhs = frame_->PopToRegister(lhs);  // Don't use lhs for this.
      __ mov(lhs, Operand(value));
1343 1344 1345
      frame_->EmitPush(lhs, TypeInfo::Smi());
      TypeInfo t = both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Unknown();
      frame_->EmitPush(rhs, t);
1346 1347
      GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI,
                             GenericBinaryOpStub::kUnknownIntValue);
1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360
    }
    return;
  }

  // We move the top of stack to a register (normally no move is invoved).
  Register tos = frame_->PopToRegister();
  switch (op) {
    case Token::ADD: {
      DeferredCode* deferred =
          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);

      __ add(tos, tos, Operand(value), SetCC);
      deferred->Branch(vs);
1361 1362 1363 1364
      if (!both_sides_are_smi) {
        __ tst(tos, Operand(kSmiTagMask));
        deferred->Branch(ne);
      }
1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
      deferred->BindExit();
      frame_->EmitPush(tos);
      break;
    }

    case Token::SUB: {
      DeferredCode* deferred =
          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);

      if (reversed) {
        __ rsb(tos, tos, Operand(value), SetCC);
      } else {
        __ sub(tos, tos, Operand(value), SetCC);
      }
      deferred->Branch(vs);
1380 1381 1382 1383
      if (!both_sides_are_smi) {
        __ tst(tos, Operand(kSmiTagMask));
        deferred->Branch(ne);
      }
1384 1385 1386 1387 1388 1389 1390 1391 1392
      deferred->BindExit();
      frame_->EmitPush(tos);
      break;
    }


    case Token::BIT_OR:
    case Token::BIT_XOR:
    case Token::BIT_AND: {
1393 1394 1395 1396
      if (both_sides_are_smi) {
        switch (op) {
          case Token::BIT_OR:  __ orr(tos, tos, Operand(value)); break;
          case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
1397
          case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
1398 1399 1400 1401
          default: UNREACHABLE();
        }
        frame_->EmitPush(tos, TypeInfo::Smi());
      } else {
1402
        DeferredInlineSmiOperation* deferred =
1403 1404
          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
        __ tst(tos, Operand(kSmiTagMask));
1405
        deferred->JumpToNonSmiInput(ne);
1406 1407 1408
        switch (op) {
          case Token::BIT_OR:  __ orr(tos, tos, Operand(value)); break;
          case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
1409
          case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
1410 1411 1412 1413 1414 1415
          default: UNREACHABLE();
        }
        deferred->BindExit();
        TypeInfo result_type =
            (op == Token::BIT_AND) ? TypeInfo::Smi() : TypeInfo::Integer32();
        frame_->EmitPush(tos, result_type);
1416 1417 1418 1419 1420
      }
      break;
    }

    case Token::SHL:
1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447
      if (reversed) {
        ASSERT(both_sides_are_smi);
        int max_shift = 0;
        int max_result = int_value == 0 ? 1 : int_value;
        while (Smi::IsValid(max_result << 1)) {
          max_shift++;
          max_result <<= 1;
        }
        DeferredCode* deferred =
          new DeferredInlineSmiOperation(op, int_value, true, mode, tos);
        // Mask off the last 5 bits of the shift operand (rhs).  This is part
        // of the definition of shift in JS and we know we have a Smi so we
        // can safely do this.  The masked version gets passed to the
        // deferred code, but that makes no difference.
        __ and_(tos, tos, Operand(Smi::FromInt(0x1f)));
        __ cmp(tos, Operand(Smi::FromInt(max_shift)));
        deferred->Branch(ge);
        Register scratch = VirtualFrame::scratch0();
        __ mov(scratch, Operand(tos, ASR, kSmiTagSize));  // Untag.
        __ mov(tos, Operand(Smi::FromInt(int_value)));    // Load constant.
        __ mov(tos, Operand(tos, LSL, scratch));          // Shift constant.
        deferred->BindExit();
        TypeInfo result = TypeInfo::Integer32();
        frame_->EmitPush(tos, result);
        break;
      }
      // Fall through!
1448 1449 1450
    case Token::SHR:
    case Token::SAR: {
      ASSERT(!reversed);
1451
      int shift_value = int_value & 0x1f;
1452 1453 1454
      TypeInfo result = TypeInfo::Number();

      if (op == Token::SHR) {
1455
        if (shift_value > 1) {
1456
          result = TypeInfo::Smi();
1457
        } else if (shift_value > 0) {
1458 1459 1460
          result = TypeInfo::Integer32();
        }
      } else if (op == Token::SAR) {
1461
        if (shift_value > 0) {
1462
          result = TypeInfo::Smi();
1463
        } else {
1464
          result = TypeInfo::Integer32();
1465
        }
1466 1467 1468
      } else {
        ASSERT(op == Token::SHL);
        result = TypeInfo::Integer32();
1469
      }
1470

1471
      DeferredInlineSmiOperation* deferred =
1472
        new DeferredInlineSmiOperation(op, shift_value, false, mode, tos);
1473 1474 1475
      if (!both_sides_are_smi) {
        __ tst(tos, Operand(kSmiTagMask));
        deferred->JumpToNonSmiInput(ne);
1476
      }
1477 1478 1479
      switch (op) {
        case Token::SHL: {
          if (shift_value != 0) {
1480 1481
            Register untagged_result = VirtualFrame::scratch0();
            Register scratch = VirtualFrame::scratch1();
1482 1483
            int adjusted_shift = shift_value - kSmiTagSize;
            ASSERT(adjusted_shift >= 0);
1484

1485
            if (adjusted_shift != 0) {
1486 1487 1488
              __ mov(untagged_result, Operand(tos, LSL, adjusted_shift));
            } else {
              __ mov(untagged_result, Operand(tos));
1489
            }
1490
            // Check that the *signed* result fits in a smi.
1491
            __ add(scratch, untagged_result, Operand(0x40000000), SetCC);
1492
            deferred->JumpToAnswerOutOfRange(mi);
1493
            __ mov(tos, Operand(untagged_result, LSL, kSmiTagSize));
1494 1495 1496 1497 1498
          }
          break;
        }
        case Token::SHR: {
          if (shift_value != 0) {
1499 1500 1501 1502
            Register untagged_result = VirtualFrame::scratch0();
            // Remove tag.
            __ mov(untagged_result, Operand(tos, ASR, kSmiTagSize));
            __ mov(untagged_result, Operand(untagged_result, LSR, shift_value));
1503
            if (shift_value == 1) {
1504 1505
              // Check that the *unsigned* result fits in a smi.
              // Neither of the two high-order bits can be set:
1506
              // - 0x80000000: high bit would be lost when smi tagging
1507 1508 1509 1510
              // - 0x40000000: this number would convert to negative when Smi
              //   tagging.
              // These two cases can only happen with shifts by 0 or 1 when
              // handed a valid smi.
1511
              __ tst(untagged_result, Operand(0xc0000000));
1512
              deferred->JumpToAnswerOutOfRange(ne);
1513
            }
1514
            __ mov(tos, Operand(untagged_result, LSL, kSmiTagSize));
1515
          } else {
1516
            __ cmp(tos, Operand(0, RelocInfo::NONE));
1517
            deferred->JumpToAnswerOutOfRange(mi);
1518 1519 1520 1521 1522
          }
          break;
        }
        case Token::SAR: {
          if (shift_value != 0) {
1523
            // Do the shift and the tag removal in one operation. If the shift
1524
            // is 31 bits (the highest possible value) then we emit the
1525 1526
            // instruction as a shift by 0 which in the ARM ISA means shift
            // arithmetically by 32.
1527 1528
            __ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f));
            __ mov(tos, Operand(tos, LSL, kSmiTagSize));
1529 1530 1531 1532 1533 1534
          }
          break;
        }
        default: UNREACHABLE();
      }
      deferred->BindExit();
1535
      frame_->EmitPush(tos, result);
1536 1537 1538 1539 1540 1541 1542 1543
      break;
    }

    case Token::MOD: {
      ASSERT(!reversed);
      ASSERT(int_value >= 2);
      ASSERT(IsPowerOf2(int_value));
      DeferredCode* deferred =
1544
          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1545 1546 1547 1548 1549 1550
      unsigned mask = (0x80000000u | kSmiTagMask);
      __ tst(tos, Operand(mask));
      deferred->Branch(ne);  // Go to deferred code on non-Smis and negative.
      mask = (int_value << kSmiTagSize) - 1;
      __ and_(tos, tos, Operand(mask));
      deferred->BindExit();
1551 1552 1553 1554
      // Mod of positive power of 2 Smi gives a Smi if the lhs is an integer.
      frame_->EmitPush(
          tos,
          both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Number());
1555 1556 1557 1558 1559 1560
      break;
    }

    case Token::MUL: {
      ASSERT(IsEasyToMultiplyBy(int_value));
      DeferredCode* deferred =
1561
          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
      unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
      max_smi_that_wont_overflow <<= kSmiTagSize;
      unsigned mask = 0x80000000u;
      while ((mask & max_smi_that_wont_overflow) == 0) {
        mask |= mask >> 1;
      }
      mask |= kSmiTagMask;
      // This does a single mask that checks for a too high value in a
      // conservative way and for a non-Smi.  It also filters out negative
      // numbers, unfortunately, but since this code is inline we prefer
      // brevity to comprehensiveness.
      __ tst(tos, Operand(mask));
      deferred->Branch(ne);
1575
      InlineMultiplyByKnownInt(masm_, tos, tos, int_value);
1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587
      deferred->BindExit();
      frame_->EmitPush(tos);
      break;
    }

    default:
      UNREACHABLE();
      break;
  }
}


1588 1589 1590 1591
void CodeGenerator::Comparison(Condition cc,
                               Expression* left,
                               Expression* right,
                               bool strict) {
1592 1593 1594 1595
  VirtualFrame::RegisterAllocationScope scope(this);

  if (left != NULL) Load(left);
  if (right != NULL) Load(right);
1596

1597 1598 1599
  // sp[0] : y
  // sp[1] : x
  // result : cc register
1600

1601 1602
  // Strict only makes sense for equality comparisons.
  ASSERT(!strict || cc == eq);
1603

1604 1605 1606
  Register lhs;
  Register rhs;

1607 1608 1609
  bool lhs_is_smi;
  bool rhs_is_smi;

1610 1611
  // We load the top two stack positions into registers chosen by the virtual
  // frame.  This should keep the register shuffling to a minimum.
1612 1613 1614
  // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
  if (cc == gt || cc == le) {
    cc = ReverseCondition(cc);
1615 1616
    lhs_is_smi = frame_->KnownSmiAt(0);
    rhs_is_smi = frame_->KnownSmiAt(1);
1617 1618
    lhs = frame_->PopToRegister();
    rhs = frame_->PopToRegister(lhs);  // Don't pop to the same register again!
1619
  } else {
1620 1621
    rhs_is_smi = frame_->KnownSmiAt(0);
    lhs_is_smi = frame_->KnownSmiAt(1);
1622 1623
    rhs = frame_->PopToRegister();
    lhs = frame_->PopToRegister(rhs);  // Don't pop to the same register again!
1624
  }
1625

1626 1627
  bool both_sides_are_smi = (lhs_is_smi && rhs_is_smi);

1628 1629 1630
  ASSERT(rhs.is(r0) || rhs.is(r1));
  ASSERT(lhs.is(r0) || lhs.is(r1));

1631
  JumpTarget exit;
1632

1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653
  if (!both_sides_are_smi) {
    // Now we have the two sides in r0 and r1.  We flush any other registers
    // because the stub doesn't know about register allocation.
    frame_->SpillAll();
    Register scratch = VirtualFrame::scratch0();
    Register smi_test_reg;
    if (lhs_is_smi) {
      smi_test_reg = rhs;
    } else if (rhs_is_smi) {
      smi_test_reg = lhs;
    } else {
      __ orr(scratch, lhs, Operand(rhs));
      smi_test_reg = scratch;
    }
    __ tst(smi_test_reg, Operand(kSmiTagMask));
    JumpTarget smi;
    smi.Branch(eq);

    // Perform non-smi comparison by stub.
    // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
    // We call with 0 args because there are 0 on the stack.
1654
    CompareStub stub(cc, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs);
1655
    frame_->CallStub(&stub, 0);
1656
    __ cmp(r0, Operand(0, RelocInfo::NONE));
1657 1658 1659 1660
    exit.Jump();

    smi.Bind();
  }
1661

1662
  // Do smi comparisons by pointer comparison.
1663
  __ cmp(lhs, Operand(rhs));
1664

1665
  exit.Bind();
1666
  cc_reg_ = cc;
1667 1668 1669
}


1670
// Call the function on the stack with the given arguments.
1671
void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
1672 1673
                                      CallFunctionFlags flags,
                                      int position) {
1674
  // Push the arguments ("left-to-right") on the stack.
1675 1676
  int arg_count = args->length();
  for (int i = 0; i < arg_count; i++) {
1677
    Load(args->at(i));
1678
  }
1679

1680
  // Record the position for debugging purposes.
1681
  CodeForSourcePosition(position);
1682

1683
  // Use the shared code stub to call the function.
1684
  InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
1685
  CallFunctionStub call_function(arg_count, in_loop, flags);
1686
  frame_->CallStub(&call_function, arg_count + 1);
1687

1688
  // Restore context and pop function from the stack.
1689
  __ ldr(cp, frame_->Context());
1690
  frame_->Drop();  // discard the TOS
1691
}
1692

1693

1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711
void CodeGenerator::CallApplyLazy(Expression* applicand,
                                  Expression* receiver,
                                  VariableProxy* arguments,
                                  int position) {
  // An optimized implementation of expressions of the form
  // x.apply(y, arguments).
  // If the arguments object of the scope has not been allocated,
  // and x.apply is Function.prototype.apply, this optimization
  // just copies y and the arguments of the current function on the
  // stack, as receiver and arguments, and calls x.
  // In the implementation comments, we call x the applicand
  // and y the receiver.

  ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
  ASSERT(arguments->IsArguments());

  // Load applicand.apply onto the stack. This will usually
  // give us a megamorphic load site. Not super, but it works.
1712
  Load(applicand);
1713
  Handle<String> name = Factory::LookupAsciiSymbol("apply");
1714
  frame_->Dup();
1715
  frame_->CallLoadIC(name, RelocInfo::CODE_TARGET);
1716 1717 1718 1719
  frame_->EmitPush(r0);

  // Load the receiver and the existing arguments object onto the
  // expression stack. Avoid allocating the arguments object here.
1720
  Load(receiver);
1721
  LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
1722

1723 1724 1725 1726 1727 1728 1729 1730 1731
  // At this point the top two stack elements are probably in registers
  // since they were just loaded.  Ensure they are in regs and get the
  // regs.
  Register receiver_reg = frame_->Peek2();
  Register arguments_reg = frame_->Peek();

  // From now on the frame is spilled.
  frame_->SpillAll();

1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744
  // Emit the source position information after having loaded the
  // receiver and the arguments.
  CodeForSourcePosition(position);
  // Contents of the stack at this point:
  //   sp[0]: arguments object of the current function or the hole.
  //   sp[1]: receiver
  //   sp[2]: applicand.apply
  //   sp[3]: applicand.

  // Check if the arguments object has been lazily allocated
  // already. If so, just use that instead of copying the arguments
  // from the stack. This also deals with cases where a local variable
  // named 'arguments' has been introduced.
1745 1746
  JumpTarget slow;
  Label done;
1747
  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
1748 1749
  __ cmp(ip, arguments_reg);
  slow.Branch(ne);
1750 1751 1752 1753 1754 1755

  Label build_args;
  // Get rid of the arguments object probe.
  frame_->Drop();
  // Stack now has 3 elements on it.
  // Contents of stack at this point:
1756
  //   sp[0]: receiver - in the receiver_reg register.
1757 1758 1759 1760
  //   sp[1]: applicand.apply
  //   sp[2]: applicand.

  // Check that the receiver really is a JavaScript object.
1761
  __ BranchOnSmi(receiver_reg, &build_args);
1762 1763 1764 1765
  // We allow all JSObjects including JSFunctions.  As long as
  // JS_FUNCTION_TYPE is the last instance type and it is right
  // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
  // bound.
1766 1767
  STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
  STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
1768
  __ CompareObjectType(receiver_reg, r2, r3, FIRST_JS_OBJECT_TYPE);
1769 1770 1771 1772 1773 1774 1775 1776
  __ b(lt, &build_args);

  // Check that applicand.apply is Function.prototype.apply.
  __ ldr(r0, MemOperand(sp, kPointerSize));
  __ BranchOnSmi(r0, &build_args);
  __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
  __ b(ne, &build_args);
  Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
1777 1778
  __ ldr(r1, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
  __ sub(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819
  __ cmp(r1, Operand(apply_code));
  __ b(ne, &build_args);

  // Check that applicand is a function.
  __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
  __ BranchOnSmi(r1, &build_args);
  __ CompareObjectType(r1, r2, r3, JS_FUNCTION_TYPE);
  __ b(ne, &build_args);

  // Copy the arguments to this function possibly from the
  // adaptor frame below it.
  Label invoke, adapted;
  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
  __ b(eq, &adapted);

  // No arguments adaptor frame. Copy fixed number of arguments.
  __ mov(r0, Operand(scope()->num_parameters()));
  for (int i = 0; i < scope()->num_parameters(); i++) {
    __ ldr(r2, frame_->ParameterAt(i));
    __ push(r2);
  }
  __ jmp(&invoke);

  // Arguments adaptor frame present. Copy arguments from there, but
  // avoid copying too many arguments to avoid stack overflows.
  __ bind(&adapted);
  static const uint32_t kArgumentsLimit = 1 * KB;
  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
  __ mov(r0, Operand(r0, LSR, kSmiTagSize));
  __ mov(r3, r0);
  __ cmp(r0, Operand(kArgumentsLimit));
  __ b(gt, &build_args);

  // Loop through the arguments pushing them onto the execution
  // stack. We don't inform the virtual frame of the push, so we don't
  // have to worry about getting rid of the elements from the virtual
  // frame.
  Label loop;
  // r3 is a small non-negative integer, due to the test above.
1820
  __ cmp(r3, Operand(0, RelocInfo::NONE));
1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856
  __ b(eq, &invoke);
  // Compute the address of the first argument.
  __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2));
  __ add(r2, r2, Operand(kPointerSize));
  __ bind(&loop);
  // Post-decrement argument address by kPointerSize on each iteration.
  __ ldr(r4, MemOperand(r2, kPointerSize, NegPostIndex));
  __ push(r4);
  __ sub(r3, r3, Operand(1), SetCC);
  __ b(gt, &loop);

  // Invoke the function.
  __ bind(&invoke);
  ParameterCount actual(r0);
  __ InvokeFunction(r1, actual, CALL_FUNCTION);
  // Drop applicand.apply and applicand from the stack, and push
  // the result of the function call, but leave the spilled frame
  // unchanged, with 3 elements, so it is correct when we compile the
  // slow-case code.
  __ add(sp, sp, Operand(2 * kPointerSize));
  __ push(r0);
  // Stack now has 1 element:
  //   sp[0]: result
  __ jmp(&done);

  // Slow-case: Allocate the arguments object since we know it isn't
  // there, and fall-through to the slow-case where we call
  // applicand.apply.
  __ bind(&build_args);
  // Stack now has 3 elements, because we have jumped from where:
  //   sp[0]: receiver
  //   sp[1]: applicand.apply
  //   sp[2]: applicand.
  StoreArgumentsObject(false);

  // Stack and frame now have 4 elements.
1857
  slow.Bind();
1858 1859 1860 1861 1862 1863 1864

  // Generic computation of x.apply(y, args) with no special optimization.
  // Flip applicand.apply and applicand on the stack, so
  // applicand looks like the receiver of the applicand.apply call.
  // Then process it as a normal function call.
  __ ldr(r0, MemOperand(sp, 3 * kPointerSize));
  __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
1865
  __ Strd(r0, r1, MemOperand(sp, 2 * kPointerSize));
1866 1867 1868 1869 1870 1871

  CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
  frame_->CallStub(&call_function, 3);
  // The function and its two arguments have been dropped.
  frame_->Drop();  // Drop the receiver as well.
  frame_->EmitPush(r0);
1872
  frame_->SpillAll();  // A spilled frame is also jumping to label done.
1873 1874 1875 1876 1877 1878 1879 1880 1881
  // Stack now has 1 element:
  //   sp[0]: result
  __ bind(&done);

  // Restore the context register after a call.
  __ ldr(cp, frame_->Context());
}


1882
void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
1883 1884
  ASSERT(has_cc());
  Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
1885
  target->Branch(cc);
1886
  cc_reg_ = al;
1887 1888 1889
}


1890
void CodeGenerator::CheckStack() {
1891
  frame_->SpillAll();
1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905
  Comment cmnt(masm_, "[ check stack");
  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
  // Put the lr setup instruction in the delay slot.  kInstrSize is added to
  // the implicit 8 byte offset that always applies to operations with pc and
  // gives a return address 12 bytes down.
  masm_->add(lr, pc, Operand(Assembler::kInstrSize));
  masm_->cmp(sp, Operand(ip));
  StackCheckStub stub;
  // Call the stub if lower.
  masm_->mov(pc,
             Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
                     RelocInfo::CODE_TARGET),
             LeaveCC,
             lo);
1906 1907 1908 1909 1910 1911 1912 1913
}


void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
#ifdef DEBUG
  int original_height = frame_->height();
#endif
  for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
1914
    Visit(statements->at(i));
1915
  }
1916
  ASSERT(!has_valid_frame() || frame_->height() == original_height);
1917
}
1918 1919


1920
void CodeGenerator::VisitBlock(Block* node) {
1921 1922 1923
#ifdef DEBUG
  int original_height = frame_->height();
#endif
1924
  Comment cmnt(masm_, "[ Block");
1925
  CodeForStatementPosition(node);
1926
  node->break_target()->SetExpectedHeight();
1927
  VisitStatements(node->statements());
1928 1929 1930
  if (node->break_target()->is_linked()) {
    node->break_target()->Bind();
  }
1931
  node->break_target()->Unuse();
1932
  ASSERT(!has_valid_frame() || frame_->height() == original_height);
1933
}
1934 1935


1936
void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
1937
  frame_->EmitPush(cp);
1938 1939 1940
  frame_->EmitPush(Operand(pairs));
  frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0)));

1941
  frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
1942 1943
  // The result is discarded.
}
1944 1945


1946
void CodeGenerator::VisitDeclaration(Declaration* node) {
1947 1948 1949
#ifdef DEBUG
  int original_height = frame_->height();
#endif
1950 1951 1952
  Comment cmnt(masm_, "[ Declaration");
  Variable* var = node->proxy()->var();
  ASSERT(var != NULL);  // must have been resolved
1953
  Slot* slot = var->AsSlot();
1954 1955 1956 1957 1958 1959 1960

  // If it was not possible to allocate the variable at compile time,
  // we need to "declare" it at runtime to make sure it actually
  // exists in the local context.
  if (slot != NULL && slot->type() == Slot::LOOKUP) {
    // Variables with a "LOOKUP" slot were introduced as non-locals
    // during variable resolution and must have mode DYNAMIC.
1961
    ASSERT(var->is_dynamic());
1962
    // For now, just do a runtime call.
1963
    frame_->EmitPush(cp);
1964
    frame_->EmitPush(Operand(var->name()));
1965 1966 1967
    // Declaration nodes are always declared in only two modes.
    ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
    PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
1968
    frame_->EmitPush(Operand(Smi::FromInt(attr)));
1969 1970 1971 1972 1973
    // Push initial value, if any.
    // Note: For variables we must not push an initial value (such as
    // 'undefined') because we may have a (legal) redeclaration and we
    // must not destroy the current value.
    if (node->mode() == Variable::CONST) {
1974
      frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
1975
    } else if (node->fun() != NULL) {
1976
      Load(node->fun());
1977
    } else {
1978
      frame_->EmitPush(Operand(0, RelocInfo::NONE));
1979
    }
1980

1981
    frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
1982
    // Ignore the return value (declarations are statements).
1983

1984
    ASSERT(frame_->height() == original_height);
1985
    return;
1986 1987
  }

1988
  ASSERT(!var->is_global());
1989

1990 1991 1992 1993 1994 1995 1996 1997
  // If we have a function or a constant, we need to initialize the variable.
  Expression* val = NULL;
  if (node->mode() == Variable::CONST) {
    val = new Literal(Factory::the_hole_value());
  } else {
    val = node->fun();  // NULL if we don't have a function
  }

1998

1999
  if (val != NULL) {
2000 2001 2002
    WriteBarrierCharacter wb_info =
        val->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
    if (val->AsLiteral() != NULL) wb_info = NEVER_NEWSPACE;
2003 2004 2005
    // Set initial value.
    Reference target(this, node->proxy());
    Load(val);
2006
    target.SetValue(NOT_CONST_INIT, wb_info);
2007

2008
    // Get rid of the assigned value (declarations are statements).
2009
    frame_->Drop();
2010
  }
2011
  ASSERT(frame_->height() == original_height);
2012 2013 2014
}


2015
void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
2016 2017 2018
#ifdef DEBUG
  int original_height = frame_->height();
#endif
2019
  Comment cmnt(masm_, "[ ExpressionStatement");
2020
  CodeForStatementPosition(node);
2021 2022
  Expression* expression = node->expression();
  expression->MarkAsStatement();
2023
  Load(expression);
2024 2025
  frame_->Drop();
  ASSERT(frame_->height() == original_height);
2026
}
2027 2028


2029
void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
2030 2031 2032
#ifdef DEBUG
  int original_height = frame_->height();
#endif
2033
  Comment cmnt(masm_, "// EmptyStatement");
2034
  CodeForStatementPosition(node);
2035
  // nothing to do
2036
  ASSERT(frame_->height() == original_height);
2037
}
2038 2039


2040
void CodeGenerator::VisitIfStatement(IfStatement* node) {
2041 2042 2043
#ifdef DEBUG
  int original_height = frame_->height();
#endif
2044
  Comment cmnt(masm_, "[ IfStatement");
2045 2046
  // Generate different code depending on which parts of the if statement
  // are present or not.
2047 2048
  bool has_then_stm = node->HasThenStatement();
  bool has_else_stm = node->HasElseStatement();
2049

2050
  CodeForStatementPosition(node);
2051

2052
  JumpTarget exit;
2053 2054
  if (has_then_stm && has_else_stm) {
    Comment cmnt(masm_, "[ IfThenElse");
2055 2056
    JumpTarget then;
    JumpTarget else_;
2057
    // if (cond)
2058
    LoadCondition(node->condition(), &then, &else_, true);
2059 2060 2061
    if (frame_ != NULL) {
      Branch(false, &else_);
    }
2062
    // then
2063 2064
    if (frame_ != NULL || then.is_linked()) {
      then.Bind();
2065
      Visit(node->then_statement());
2066 2067 2068 2069
    }
    if (frame_ != NULL) {
      exit.Jump();
    }
2070
    // else
2071 2072
    if (else_.is_linked()) {
      else_.Bind();
2073
      Visit(node->else_statement());
2074
    }
2075

2076 2077 2078
  } else if (has_then_stm) {
    Comment cmnt(masm_, "[ IfThen");
    ASSERT(!has_else_stm);
2079
    JumpTarget then;
2080
    // if (cond)
2081
    LoadCondition(node->condition(), &then, &exit, true);
2082 2083 2084
    if (frame_ != NULL) {
      Branch(false, &exit);
    }
2085
    // then
2086 2087
    if (frame_ != NULL || then.is_linked()) {
      then.Bind();
2088
      Visit(node->then_statement());
2089
    }
2090

2091 2092 2093
  } else if (has_else_stm) {
    Comment cmnt(masm_, "[ IfElse");
    ASSERT(!has_then_stm);
2094
    JumpTarget else_;
2095
    // if (!cond)
2096
    LoadCondition(node->condition(), &exit, &else_, true);
2097 2098 2099
    if (frame_ != NULL) {
      Branch(true, &exit);
    }
2100
    // else
2101 2102
    if (frame_ != NULL || else_.is_linked()) {
      else_.Bind();
2103
      Visit(node->else_statement());
2104
    }
2105 2106

  } else {
2107 2108 2109
    Comment cmnt(masm_, "[ If");
    ASSERT(!has_then_stm && !has_else_stm);
    // if (cond)
2110
    LoadCondition(node->condition(), &exit, &exit, false);
2111 2112 2113 2114 2115 2116
    if (frame_ != NULL) {
      if (has_cc()) {
        cc_reg_ = al;
      } else {
        frame_->Drop();
      }
2117
    }
2118 2119
  }

2120
  // end
2121 2122 2123 2124
  if (exit.is_linked()) {
    exit.Bind();
  }
  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2125
}
2126 2127


2128
void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
2129
  Comment cmnt(masm_, "[ ContinueStatement");
2130 2131
  CodeForStatementPosition(node);
  node->target()->continue_target()->Jump();
2132 2133 2134
}


2135
void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
2136
  Comment cmnt(masm_, "[ BreakStatement");
2137 2138
  CodeForStatementPosition(node);
  node->target()->break_target()->Jump();
2139
}
2140 2141


2142
void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
2143
  Comment cmnt(masm_, "[ ReturnStatement");
2144

2145
  CodeForStatementPosition(node);
2146
  Load(node->expression());
2147 2148
  frame_->PopToR0();
  frame_->PrepareForReturn();
2149 2150 2151 2152 2153
  if (function_return_is_shadowed_) {
    function_return_.Jump();
  } else {
    // Pop the result from the frame and prepare the frame for
    // returning thus making it easier to merge.
2154 2155 2156 2157 2158 2159 2160 2161 2162 2163
    if (function_return_.is_bound()) {
      // If the function return label is already bound we reuse the
      // code by jumping to the return site.
      function_return_.Jump();
    } else {
      function_return_.Bind();
      GenerateReturnSequence();
    }
  }
}
2164

2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203

void CodeGenerator::GenerateReturnSequence() {
  if (FLAG_trace) {
    // Push the return value on the stack as the parameter.
    // Runtime::TraceExit returns the parameter as it is.
    frame_->EmitPush(r0);
    frame_->CallRuntime(Runtime::kTraceExit, 1);
  }

#ifdef DEBUG
  // Add a label for checking the size of the code used for returning.
  Label check_exit_codesize;
  masm_->bind(&check_exit_codesize);
#endif
  // Make sure that the constant pool is not emitted inside of the return
  // sequence.
  { Assembler::BlockConstPoolScope block_const_pool(masm_);
    // Tear down the frame which will restore the caller's frame pointer and
    // the link register.
    frame_->Exit();

    // Here we use masm_-> instead of the __ macro to avoid the code coverage
    // tool from instrumenting as we rely on the code size here.
    int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
    masm_->add(sp, sp, Operand(sp_delta));
    masm_->Jump(lr);
    DeleteFrame();

#ifdef DEBUG
    // Check that the size of the code used for returning matches what is
    // expected by the debugger. If the sp_delts above cannot be encoded in
    // the add instruction the add will generate two instructions.
    int return_sequence_length =
        masm_->InstructionsGeneratedSince(&check_exit_codesize);
    CHECK(return_sequence_length ==
          Assembler::kJSReturnSequenceInstructions ||
          return_sequence_length ==
          Assembler::kJSReturnSequenceInstructions + 1);
#endif
2204
  }
2205
}
2206 2207


2208
void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
2209 2210 2211
#ifdef DEBUG
  int original_height = frame_->height();
#endif
2212
  Comment cmnt(masm_, "[ WithEnterStatement");
2213
  CodeForStatementPosition(node);
2214
  Load(node->expression());
2215
  if (node->is_catch_block()) {
2216
    frame_->CallRuntime(Runtime::kPushCatchContext, 1);
2217
  } else {
2218
    frame_->CallRuntime(Runtime::kPushContext, 1);
2219
  }
2220
#ifdef DEBUG
2221
  JumpTarget verified_true;
2222
  __ cmp(r0, cp);
2223 2224 2225 2226
  verified_true.Branch(eq);
  __ stop("PushContext: r0 is expected to be the same as cp");
  verified_true.Bind();
#endif
2227
  // Update context local.
2228
  __ str(cp, frame_->Context());
2229
  ASSERT(frame_->height() == original_height);
2230
}
2231

2232

2233
void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
2234 2235 2236
#ifdef DEBUG
  int original_height = frame_->height();
#endif
2237
  Comment cmnt(masm_, "[ WithExitStatement");
2238
  CodeForStatementPosition(node);
2239 2240 2241
  // Pop context.
  __ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX));
  // Update context local.
2242
  __ str(cp, frame_->Context());
2243
  ASSERT(frame_->height() == original_height);
2244
}
2245 2246


2247
void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
2248 2249 2250
#ifdef DEBUG
  int original_height = frame_->height();
#endif
2251
  Comment cmnt(masm_, "[ SwitchStatement");
2252
  CodeForStatementPosition(node);
2253
  node->break_target()->SetExpectedHeight();
2254

2255
  Load(node->tag());
2256

2257 2258 2259 2260
  JumpTarget next_test;
  JumpTarget fall_through;
  JumpTarget default_entry;
  JumpTarget default_exit(JumpTarget::BIDIRECTIONAL);
2261 2262
  ZoneList<CaseClause*>* cases = node->cases();
  int length = cases->length();
2263
  CaseClause* default_clause = NULL;
2264

2265 2266 2267
  for (int i = 0; i < length; i++) {
    CaseClause* clause = cases->at(i);
    if (clause->is_default()) {
2268 2269 2270 2271 2272 2273 2274 2275 2276 2277
      // Remember the default clause and compile it at the end.
      default_clause = clause;
      continue;
    }

    Comment cmnt(masm_, "[ Case clause");
    // Compile the test.
    next_test.Bind();
    next_test.Unuse();
    // Duplicate TOS.
2278
    frame_->Dup();
2279
    Comparison(eq, NULL, clause->label(), true);
2280 2281 2282 2283 2284 2285 2286 2287 2288
    Branch(false, &next_test);

    // Before entering the body from the test, remove the switch value from
    // the stack.
    frame_->Drop();

    // Label the body so that fall through is enabled.
    if (i > 0 && cases->at(i - 1)->is_default()) {
      default_exit.Bind();
2289
    } else {
2290 2291
      fall_through.Bind();
      fall_through.Unuse();
2292
    }
2293
    VisitStatements(clause->statements());
2294

2295 2296 2297 2298 2299 2300 2301 2302 2303 2304
    // If control flow can fall through from the body, jump to the next body
    // or the end of the statement.
    if (frame_ != NULL) {
      if (i < length - 1 && cases->at(i + 1)->is_default()) {
        default_entry.Jump();
      } else {
        fall_through.Jump();
      }
    }
  }
2305

2306 2307 2308 2309 2310 2311 2312 2313
  // The final "test" removes the switch value.
  next_test.Bind();
  frame_->Drop();

  // If there is a default clause, compile it.
  if (default_clause != NULL) {
    Comment cmnt(masm_, "[ Default clause");
    default_entry.Bind();
2314
    VisitStatements(default_clause->statements());
2315
    // If control flow can fall out of the default and there is a case after
2316
    // it, jump to that case's body.
2317 2318 2319
    if (frame_ != NULL && default_exit.is_bound()) {
      default_exit.Jump();
    }
2320 2321
  }

2322 2323
  if (fall_through.is_linked()) {
    fall_through.Bind();
2324
  }
2325

2326 2327 2328
  if (node->break_target()->is_linked()) {
    node->break_target()->Bind();
  }
2329
  node->break_target()->Unuse();
2330
  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2331
}
2332 2333


2334
void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
2335 2336 2337
#ifdef DEBUG
  int original_height = frame_->height();
#endif
2338
  Comment cmnt(masm_, "[ DoWhileStatement");
2339
  CodeForStatementPosition(node);
2340
  node->break_target()->SetExpectedHeight();
2341
  JumpTarget body(JumpTarget::BIDIRECTIONAL);
2342
  IncrementLoopNesting();
2343 2344 2345 2346 2347 2348 2349

  // Label the top of the loop for the backward CFG edge.  If the test
  // is always true we can use the continue target, and if the test is
  // always false there is no need.
  ConditionAnalysis info = AnalyzeCondition(node->cond());
  switch (info) {
    case ALWAYS_TRUE:
2350
      node->continue_target()->SetExpectedHeight();
2351 2352 2353
      node->continue_target()->Bind();
      break;
    case ALWAYS_FALSE:
2354
      node->continue_target()->SetExpectedHeight();
2355 2356
      break;
    case DONT_KNOW:
2357
      node->continue_target()->SetExpectedHeight();
2358 2359
      body.Bind();
      break;
2360
  }
2361

2362
  CheckStack();  // TODO(1222600): ignore if body contains calls.
2363
  Visit(node->body());
2364

2365
  // Compile the test.
2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377
  switch (info) {
    case ALWAYS_TRUE:
      // If control can fall off the end of the body, jump back to the
      // top.
      if (has_valid_frame()) {
        node->continue_target()->Jump();
      }
      break;
    case ALWAYS_FALSE:
      // If we have a continue in the body, we only have to bind its
      // jump target.
      if (node->continue_target()->is_linked()) {
2378 2379
        node->continue_target()->Bind();
      }
2380 2381 2382 2383 2384 2385 2386 2387
      break;
    case DONT_KNOW:
      // We have to compile the test expression if it can be reached by
      // control flow falling out of the body or via continue.
      if (node->continue_target()->is_linked()) {
        node->continue_target()->Bind();
      }
      if (has_valid_frame()) {
2388 2389
        Comment cmnt(masm_, "[ DoWhileCondition");
        CodeForDoWhileConditionPosition(node);
2390
        LoadCondition(node->cond(), &body, node->break_target(), true);
2391
        if (has_valid_frame()) {
2392 2393 2394
          // A invalid frame here indicates that control did not
          // fall out of the test expression.
          Branch(true, &body);
2395 2396 2397
        }
      }
      break;
2398
  }
2399

2400 2401 2402
  if (node->break_target()->is_linked()) {
    node->break_target()->Bind();
  }
2403
  DecrementLoopNesting();
2404 2405
  ASSERT(!has_valid_frame() || frame_->height() == original_height);
}
2406 2407


2408 2409 2410 2411 2412 2413
void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
#ifdef DEBUG
  int original_height = frame_->height();
#endif
  Comment cmnt(masm_, "[ WhileStatement");
  CodeForStatementPosition(node);
2414

2415 2416 2417 2418
  // If the test is never true and has no side effects there is no need
  // to compile the test or body.
  ConditionAnalysis info = AnalyzeCondition(node->cond());
  if (info == ALWAYS_FALSE) return;
2419

2420
  node->break_target()->SetExpectedHeight();
2421
  IncrementLoopNesting();
2422 2423 2424

  // Label the top of the loop with the continue target for the backward
  // CFG edge.
2425
  node->continue_target()->SetExpectedHeight();
2426 2427 2428
  node->continue_target()->Bind();

  if (info == DONT_KNOW) {
2429
    JumpTarget body(JumpTarget::BIDIRECTIONAL);
2430
    LoadCondition(node->cond(), &body, node->break_target(), true);
2431 2432 2433 2434 2435 2436 2437
    if (has_valid_frame()) {
      // A NULL frame indicates that control did not fall out of the
      // test expression.
      Branch(false, node->break_target());
    }
    if (has_valid_frame() || body.is_linked()) {
      body.Bind();
2438
    }
2439
  }
2440

2441 2442
  if (has_valid_frame()) {
    CheckStack();  // TODO(1222600): ignore if body contains calls.
2443
    Visit(node->body());
2444

2445 2446 2447 2448 2449 2450 2451 2452
    // If control flow can fall out of the body, jump back to the top.
    if (has_valid_frame()) {
      node->continue_target()->Jump();
    }
  }
  if (node->break_target()->is_linked()) {
    node->break_target()->Bind();
  }
2453
  DecrementLoopNesting();
2454 2455
  ASSERT(!has_valid_frame() || frame_->height() == original_height);
}
2456 2457


2458 2459 2460 2461 2462 2463 2464
void CodeGenerator::VisitForStatement(ForStatement* node) {
#ifdef DEBUG
  int original_height = frame_->height();
#endif
  Comment cmnt(masm_, "[ ForStatement");
  CodeForStatementPosition(node);
  if (node->init() != NULL) {
2465
    Visit(node->init());
2466
  }
2467

2468 2469 2470 2471 2472
  // If the test is never true there is no need to compile the test or
  // body.
  ConditionAnalysis info = AnalyzeCondition(node->cond());
  if (info == ALWAYS_FALSE) return;

2473
  node->break_target()->SetExpectedHeight();
2474
  IncrementLoopNesting();
2475

2476 2477 2478 2479 2480 2481 2482
  // We know that the loop index is a smi if it is not modified in the
  // loop body and it is checked against a constant limit in the loop
  // condition.  In this case, we reset the static type information of the
  // loop index to smi before compiling the body, the update expression, and
  // the bottom check of the loop condition.
  TypeInfoCodeGenState type_info_scope(this,
                                       node->is_fast_smi_loop() ?
2483 2484
                                       node->loop_variable()->AsSlot() :
                                       NULL,
2485 2486
                                       TypeInfo::Smi());

2487 2488 2489 2490
  // If there is no update statement, label the top of the loop with the
  // continue target, otherwise with the loop target.
  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
  if (node->next() == NULL) {
2491
    node->continue_target()->SetExpectedHeight();
2492 2493
    node->continue_target()->Bind();
  } else {
2494
    node->continue_target()->SetExpectedHeight();
2495 2496 2497 2498 2499 2500
    loop.Bind();
  }

  // If the test is always true, there is no need to compile it.
  if (info == DONT_KNOW) {
    JumpTarget body;
2501
    LoadCondition(node->cond(), &body, node->break_target(), true);
2502 2503 2504 2505 2506 2507 2508 2509 2510 2511
    if (has_valid_frame()) {
      Branch(false, node->break_target());
    }
    if (has_valid_frame() || body.is_linked()) {
      body.Bind();
    }
  }

  if (has_valid_frame()) {
    CheckStack();  // TODO(1222600): ignore if body contains calls.
2512
    Visit(node->body());
2513

2514 2515 2516
    if (node->next() == NULL) {
      // If there is no update statement and control flow can fall out
      // of the loop, jump directly to the continue label.
2517
      if (has_valid_frame()) {
2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531
        node->continue_target()->Jump();
      }
    } else {
      // If there is an update statement and control flow can reach it
      // via falling out of the body of the loop or continuing, we
      // compile the update statement.
      if (node->continue_target()->is_linked()) {
        node->continue_target()->Bind();
      }
      if (has_valid_frame()) {
        // Record source position of the statement as this code which is
        // after the code for the body actually belongs to the loop
        // statement and not the body.
        CodeForStatementPosition(node);
2532
        Visit(node->next());
2533
        loop.Jump();
2534 2535
      }
    }
2536
  }
2537 2538 2539
  if (node->break_target()->is_linked()) {
    node->break_target()->Bind();
  }
2540
  DecrementLoopNesting();
2541
  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2542
}
2543 2544


2545
void CodeGenerator::VisitForInStatement(ForInStatement* node) {
2546 2547 2548
#ifdef DEBUG
  int original_height = frame_->height();
#endif
2549
  Comment cmnt(masm_, "[ ForInStatement");
2550
  CodeForStatementPosition(node);
2551

2552 2553 2554 2555 2556 2557
  JumpTarget primitive;
  JumpTarget jsobject;
  JumpTarget fixed_array;
  JumpTarget entry(JumpTarget::BIDIRECTIONAL);
  JumpTarget end_del_check;
  JumpTarget exit;
2558

2559
  // Get the object to enumerate over (converted to JSObject).
2560
  Load(node->enumerable());
2561

2562
  VirtualFrame::SpilledScope spilled_scope(frame_);
2563 2564
  // Both SpiderMonkey and kjs ignore null and undefined in contrast
  // to the specification.  12.6.4 mandates a call to ToObject.
2565
  frame_->EmitPop(r0);
2566 2567
  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
  __ cmp(r0, ip);
2568
  exit.Branch(eq);
2569 2570
  __ LoadRoot(ip, Heap::kNullValueRootIndex);
  __ cmp(r0, ip);
2571
  exit.Branch(eq);
2572

2573 2574 2575 2576 2577 2578
  // Stack layout in body:
  // [iteration counter (Smi)]
  // [length of array]
  // [FixedArray]
  // [Map or 0]
  // [Object]
2579

2580 2581
  // Check if enumerable is already a JSObject
  __ tst(r0, Operand(kSmiTagMask));
2582
  primitive.Branch(eq);
2583
  __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
2584
  jsobject.Branch(hs);
2585

2586 2587
  primitive.Bind();
  frame_->EmitPush(r0);
2588
  frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
2589

2590
  jsobject.Bind();
2591
  // Get the set of properties (as a FixedArray or Map).
2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644
  // r0: value to be iterated over
  frame_->EmitPush(r0);  // Push the object being iterated over.

  // Check cache validity in generated code. This is a fast case for
  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
  // guarantee cache validity, call the runtime system to check cache
  // validity or get the property names in a fixed array.
  JumpTarget call_runtime;
  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
  JumpTarget check_prototype;
  JumpTarget use_cache;
  __ mov(r1, Operand(r0));
  loop.Bind();
  // Check that there are no elements.
  __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
  __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
  __ cmp(r2, r4);
  call_runtime.Branch(ne);
  // Check that instance descriptors are not empty so that we can
  // check for an enum cache.  Leave the map in r3 for the subsequent
  // prototype load.
  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
  __ ldr(r2, FieldMemOperand(r3, Map::kInstanceDescriptorsOffset));
  __ LoadRoot(ip, Heap::kEmptyDescriptorArrayRootIndex);
  __ cmp(r2, ip);
  call_runtime.Branch(eq);
  // Check that there in an enum cache in the non-empty instance
  // descriptors.  This is the case if the next enumeration index
  // field does not contain a smi.
  __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumerationIndexOffset));
  __ tst(r2, Operand(kSmiTagMask));
  call_runtime.Branch(eq);
  // For all objects but the receiver, check that the cache is empty.
  // r4: empty fixed array root.
  __ cmp(r1, r0);
  check_prototype.Branch(eq);
  __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
  __ cmp(r2, r4);
  call_runtime.Branch(ne);
  check_prototype.Bind();
  // Load the prototype from the map and loop if non-null.
  __ ldr(r1, FieldMemOperand(r3, Map::kPrototypeOffset));
  __ LoadRoot(ip, Heap::kNullValueRootIndex);
  __ cmp(r1, ip);
  loop.Branch(ne);
  // The enum cache is valid.  Load the map of the object being
  // iterated over and use the cache for the iteration.
  __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
  use_cache.Jump();

  call_runtime.Bind();
  // Call the runtime to get the property names for the object.
  frame_->EmitPush(r0);  // push the object (slot 4) for the runtime call
2645
  frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
2646

2647 2648 2649 2650 2651
  // If we got a map from the runtime call, we can do a fast
  // modification check. Otherwise, we got a fixed array, and we have
  // to do a slow check.
  // r0: map or fixed array (result from call to
  // Runtime::kGetPropertyNamesFast)
2652 2653
  __ mov(r2, Operand(r0));
  __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
2654 2655
  __ LoadRoot(ip, Heap::kMetaMapRootIndex);
  __ cmp(r1, ip);
2656
  fixed_array.Branch(ne);
2657

2658
  use_cache.Bind();
2659
  // Get enum cache
2660 2661 2662
  // r0: map (either the result from a call to
  // Runtime::kGetPropertyNamesFast or has been fetched directly from
  // the object)
2663 2664 2665 2666 2667
  __ mov(r1, Operand(r0));
  __ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
  __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
  __ ldr(r2,
         FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
2668

2669 2670
  frame_->EmitPush(r0);  // map
  frame_->EmitPush(r2);  // enum cache bridge cache
2671
  __ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
2672
  frame_->EmitPush(r0);
2673
  __ mov(r0, Operand(Smi::FromInt(0)));
2674 2675
  frame_->EmitPush(r0);
  entry.Jump();
2676

2677
  fixed_array.Bind();
2678
  __ mov(r1, Operand(Smi::FromInt(0)));
2679 2680
  frame_->EmitPush(r1);  // insert 0 in place of Map
  frame_->EmitPush(r0);
2681

2682 2683
  // Push the length of the array and the initial index onto the stack.
  __ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
2684
  frame_->EmitPush(r0);
2685
  __ mov(r0, Operand(Smi::FromInt(0)));  // init index
2686
  frame_->EmitPush(r0);
2687

2688
  // Condition.
2689
  entry.Bind();
2690 2691 2692 2693 2694
  // sp[0] : index
  // sp[1] : array/enum cache length
  // sp[2] : array or enum cache
  // sp[3] : 0 or map
  // sp[4] : enumerable
2695 2696
  // Grab the current frame's height for the break and continue
  // targets only after all the state is pushed on the frame.
2697 2698
  node->break_target()->SetExpectedHeight();
  node->continue_target()->SetExpectedHeight();
2699

2700
  // Load the current count to r0, load the length to r1.
2701
  __ Ldrd(r0, r1, frame_->ElementAt(0));
2702
  __ cmp(r0, r1);  // compare to the array length
2703
  node->break_target()->Branch(hs);
2704

2705
  // Get the i'th entry of the array.
2706
  __ ldr(r2, frame_->ElementAt(2));
2707 2708
  __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
  __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
2709

2710
  // Get Map or 0.
2711
  __ ldr(r2, frame_->ElementAt(3));
2712 2713
  // Check if this (still) matches the map of the enumerable.
  // If not, we have to filter the key.
2714
  __ ldr(r1, frame_->ElementAt(4));
2715 2716
  __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
  __ cmp(r1, Operand(r2));
2717
  end_del_check.Branch(eq);
2718

2719
  // Convert the entry to a string (or null if it isn't a property anymore).
2720 2721 2722
  __ ldr(r0, frame_->ElementAt(4));  // push enumerable
  frame_->EmitPush(r0);
  frame_->EmitPush(r3);  // push entry
2723
  frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
2724
  __ mov(r3, Operand(r0), SetCC);
2725
  // If the property has been removed while iterating, we just skip it.
2726
  node->continue_target()->Branch(eq);
2727

2728 2729 2730 2731
  end_del_check.Bind();
  // Store the entry in the 'each' expression and take another spin in the
  // loop.  r3: i'th entry of the enum cache (or string there of)
  frame_->EmitPush(r3);  // push entry
2732 2733
  { VirtualFrame::RegisterAllocationScope scope(this);
    Reference each(this, node->each());
2734 2735
    if (!each.is_illegal()) {
      if (each.size() > 0) {
2736 2737 2738
        // Loading a reference may leave the frame in an unspilled state.
        frame_->SpillAll();  // Sync stack to memory.
        // Get the value (under the reference on the stack) from memory.
2739 2740
        __ ldr(r0, frame_->ElementAt(each.size()));
        frame_->EmitPush(r0);
2741
        each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
2742
        frame_->Drop(2);  // The result of the set and the extra pushed value.
2743 2744
      } else {
        // If the reference was to a slot we rely on the convenient property
2745
        // that it doesn't matter whether a value (eg, ebx pushed above) is
2746
        // right on top of or right underneath a zero-sized reference.
2747
        each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
2748
        frame_->Drop(1);  // Drop the result of the set operation.
2749 2750
      }
    }
2751
  }
2752
  // Body.
2753
  CheckStack();  // TODO(1222600): ignore if body contains calls.
2754 2755 2756
  { VirtualFrame::RegisterAllocationScope scope(this);
    Visit(node->body());
  }
2757

kmillikin@chromium.org's avatar
kmillikin@chromium.org committed
2758 2759
  // Next.  Reestablish a spilled frame in case we are coming here via
  // a continue in the body.
2760
  node->continue_target()->Bind();
kmillikin@chromium.org's avatar
kmillikin@chromium.org committed
2761
  frame_->SpillAll();
2762 2763 2764 2765
  frame_->EmitPop(r0);
  __ add(r0, r0, Operand(Smi::FromInt(1)));
  frame_->EmitPush(r0);
  entry.Jump();
2766

kmillikin@chromium.org's avatar
kmillikin@chromium.org committed
2767 2768
  // Cleanup.  No need to spill because VirtualFrame::Drop is safe for
  // any frame.
2769
  node->break_target()->Bind();
2770
  frame_->Drop(5);
2771

2772
  // Exit.
2773
  exit.Bind();
2774 2775
  node->continue_target()->Unuse();
  node->break_target()->Unuse();
2776
  ASSERT(frame_->height() == original_height);
2777 2778 2779
}


2780
void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
2781 2782 2783
#ifdef DEBUG
  int original_height = frame_->height();
#endif
2784
  VirtualFrame::SpilledScope spilled_scope(frame_);
2785
  Comment cmnt(masm_, "[ TryCatchStatement");
2786
  CodeForStatementPosition(node);
2787

2788 2789
  JumpTarget try_block;
  JumpTarget exit;
2790

2791
  try_block.Call();
2792
  // --- Catch block ---
2793
  frame_->EmitPush(r0);
2794

2795
  // Store the caught exception in the catch variable.
2796
  Variable* catch_var = node->catch_var()->var();
2797 2798
  ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
  StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
2799

2800
  // Remove the exception from the stack.
2801
  frame_->Drop();
2802

2803 2804 2805
  { VirtualFrame::RegisterAllocationScope scope(this);
    VisitStatements(node->catch_block()->statements());
  }
2806 2807 2808
  if (frame_ != NULL) {
    exit.Jump();
  }
2809 2810


2811
  // --- Try block ---
2812
  try_block.Bind();
2813

2814 2815
  frame_->PushTryHandler(TRY_CATCH_HANDLER);
  int handler_height = frame_->height();
2816

2817 2818 2819 2820 2821 2822 2823
  // Shadow the labels for all escapes from the try block, including
  // returns. During shadowing, the original label is hidden as the
  // LabelShadow and operations on the original actually affect the
  // shadowing label.
  //
  // We should probably try to unify the escaping labels and the return
  // label.
2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834
  int nof_escapes = node->escaping_targets()->length();
  List<ShadowTarget*> shadows(1 + nof_escapes);

  // Add the shadow target for the function return.
  static const int kReturnShadowIndex = 0;
  shadows.Add(new ShadowTarget(&function_return_));
  bool function_return_was_shadowed = function_return_is_shadowed_;
  function_return_is_shadowed_ = true;
  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);

  // Add the remaining shadow targets.
2835
  for (int i = 0; i < nof_escapes; i++) {
2836
    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2837
  }
2838

2839
  // Generate code for the statements in the try block.
2840 2841 2842
  { VirtualFrame::RegisterAllocationScope scope(this);
    VisitStatements(node->try_block()->statements());
  }
2843

2844
  // Stop the introduced shadowing and count the number of required unlinks.
2845 2846
  // After shadowing stops, the original labels are unshadowed and the
  // LabelShadows represent the formerly shadowing labels.
kmillikin@chromium.org's avatar
kmillikin@chromium.org committed
2847
  bool has_unlinks = false;
2848
  for (int i = 0; i < shadows.length(); i++) {
2849
    shadows[i]->StopShadowing();
kmillikin@chromium.org's avatar
kmillikin@chromium.org committed
2850
    has_unlinks = has_unlinks || shadows[i]->is_linked();
2851
  }
2852 2853
  function_return_is_shadowed_ = function_return_was_shadowed;

kmillikin@chromium.org's avatar
kmillikin@chromium.org committed
2854 2855 2856
  // Get an external reference to the handler address.
  ExternalReference handler_address(Top::k_handler_address);

2857 2858
  // If we can fall off the end of the try block, unlink from try chain.
  if (has_valid_frame()) {
2859 2860 2861
    // The next handler address is on top of the frame.  Unlink from
    // the handler list and drop the rest of this handler from the
    // frame.
2862
    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2863
    frame_->EmitPop(r1);  // r0 can contain the return value.
kmillikin@chromium.org's avatar
kmillikin@chromium.org committed
2864
    __ mov(r3, Operand(handler_address));
2865
    __ str(r1, MemOperand(r3));
2866
    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
kmillikin@chromium.org's avatar
kmillikin@chromium.org committed
2867
    if (has_unlinks) {
2868 2869 2870
      exit.Jump();
    }
  }
2871

2872
  // Generate unlink code for the (formerly) shadowing labels that have been
2873 2874
  // jumped to.  Deallocate each shadow target.
  for (int i = 0; i < shadows.length(); i++) {
2875 2876
    if (shadows[i]->is_linked()) {
      // Unlink from try chain;
2877 2878 2879 2880
      shadows[i]->Bind();
      // Because we can be jumping here (to spilled code) from unspilled
      // code, we need to reestablish a spilled frame at this block.
      frame_->SpillAll();
2881

2882 2883
      // Reload sp from the top handler, because some statements that we
      // break from (eg, for...in) may have left stuff on the stack.
kmillikin@chromium.org's avatar
kmillikin@chromium.org committed
2884
      __ mov(r3, Operand(handler_address));
2885
      __ ldr(sp, MemOperand(r3));
2886
      frame_->Forget(frame_->height() - handler_height);
2887

2888
      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2889
      frame_->EmitPop(r1);  // r0 can contain the return value.
2890
      __ str(r1, MemOperand(r3));
2891
      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2892

2893 2894 2895 2896
      if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
        frame_->PrepareForReturn();
      }
      shadows[i]->other_target()->Jump();
2897 2898
    }
  }
2899

2900 2901
  exit.Bind();
  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2902
}
2903 2904


2905
void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
2906 2907 2908
#ifdef DEBUG
  int original_height = frame_->height();
#endif
2909
  VirtualFrame::SpilledScope spilled_scope(frame_);
2910
  Comment cmnt(masm_, "[ TryFinallyStatement");
2911
  CodeForStatementPosition(node);
2912

2913 2914 2915 2916
  // State: Used to keep track of reason for entering the finally
  // block. Should probably be extended to hold information for
  // break/continue from within the try block.
  enum { FALLING, THROWING, JUMPING };
2917

2918 2919
  JumpTarget try_block;
  JumpTarget finally_block;
2920

2921
  try_block.Call();
2922

2923
  frame_->EmitPush(r0);  // save exception object on the stack
2924 2925
  // In case of thrown exceptions, this is where we continue.
  __ mov(r2, Operand(Smi::FromInt(THROWING)));
2926
  finally_block.Jump();
2927

2928
  // --- Try block ---
2929
  try_block.Bind();
2930

2931 2932
  frame_->PushTryHandler(TRY_FINALLY_HANDLER);
  int handler_height = frame_->height();
2933

2934 2935 2936 2937 2938 2939
  // Shadow the labels for all escapes from the try block, including
  // returns.  Shadowing hides the original label as the LabelShadow and
  // operations on the original actually affect the shadowing label.
  //
  // We should probably try to unify the escaping labels and the return
  // label.
2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950
  int nof_escapes = node->escaping_targets()->length();
  List<ShadowTarget*> shadows(1 + nof_escapes);

  // Add the shadow target for the function return.
  static const int kReturnShadowIndex = 0;
  shadows.Add(new ShadowTarget(&function_return_));
  bool function_return_was_shadowed = function_return_is_shadowed_;
  function_return_is_shadowed_ = true;
  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);

  // Add the remaining shadow targets.
2951
  for (int i = 0; i < nof_escapes; i++) {
2952
    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2953
  }
2954

2955
  // Generate code for the statements in the try block.
2956 2957 2958
  { VirtualFrame::RegisterAllocationScope scope(this);
    VisitStatements(node->try_block()->statements());
  }
2959

2960 2961 2962
  // Stop the introduced shadowing and count the number of required unlinks.
  // After shadowing stops, the original labels are unshadowed and the
  // LabelShadows represent the formerly shadowing labels.
2963
  int nof_unlinks = 0;
2964
  for (int i = 0; i < shadows.length(); i++) {
2965 2966 2967
    shadows[i]->StopShadowing();
    if (shadows[i]->is_linked()) nof_unlinks++;
  }
2968 2969
  function_return_is_shadowed_ = function_return_was_shadowed;

kmillikin@chromium.org's avatar
kmillikin@chromium.org committed
2970 2971 2972 2973 2974
  // Get an external reference to the handler address.
  ExternalReference handler_address(Top::k_handler_address);

  // If we can fall off the end of the try block, unlink from the try
  // chain and set the state on the frame to FALLING.
2975
  if (has_valid_frame()) {
2976
    // The next handler address is on top of the frame.
2977
    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2978
    frame_->EmitPop(r1);
kmillikin@chromium.org's avatar
kmillikin@chromium.org committed
2979 2980
    __ mov(r3, Operand(handler_address));
    __ str(r1, MemOperand(r3));
2981
    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
kmillikin@chromium.org's avatar
kmillikin@chromium.org committed
2982 2983 2984

    // Fake a top of stack value (unneeded when FALLING) and set the
    // state in r2, then jump around the unlink blocks if any.
2985
    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2986 2987 2988
    frame_->EmitPush(r0);
    __ mov(r2, Operand(Smi::FromInt(FALLING)));
    if (nof_unlinks > 0) {
kmillikin@chromium.org's avatar
kmillikin@chromium.org committed
2989
      finally_block.Jump();
2990 2991
    }
  }
2992

kmillikin@chromium.org's avatar
kmillikin@chromium.org committed
2993
  // Generate code to unlink and set the state for the (formerly)
2994 2995
  // shadowing targets that have been jumped to.
  for (int i = 0; i < shadows.length(); i++) {
2996
    if (shadows[i]->is_linked()) {
kmillikin@chromium.org's avatar
kmillikin@chromium.org committed
2997 2998 2999 3000
      // If we have come from the shadowed return, the return value is
      // in (a non-refcounted reference to) r0.  We must preserve it
      // until it is pushed.
      //
3001 3002 3003 3004 3005
      // Because we can be jumping here (to spilled code) from
      // unspilled code, we need to reestablish a spilled frame at
      // this block.
      shadows[i]->Bind();
      frame_->SpillAll();
kmillikin@chromium.org's avatar
kmillikin@chromium.org committed
3006 3007 3008 3009 3010 3011

      // Reload sp from the top handler, because some statements that
      // we break from (eg, for...in) may have left stuff on the
      // stack.
      __ mov(r3, Operand(handler_address));
      __ ldr(sp, MemOperand(r3));
3012
      frame_->Forget(frame_->height() - handler_height);
kmillikin@chromium.org's avatar
kmillikin@chromium.org committed
3013 3014

      // Unlink this handler and drop it from the frame.  The next
3015
      // handler address is currently on top of the frame.
3016
      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
kmillikin@chromium.org's avatar
kmillikin@chromium.org committed
3017 3018
      frame_->EmitPop(r1);
      __ str(r1, MemOperand(r3));
3019
      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
kmillikin@chromium.org's avatar
kmillikin@chromium.org committed
3020

3021
      if (i == kReturnShadowIndex) {
3022 3023
        // If this label shadowed the function return, materialize the
        // return value on the stack.
3024
        frame_->EmitPush(r0);
3025
      } else {
3026
        // Fake TOS for targets that shadowed breaks and continues.
3027
        __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
3028
        frame_->EmitPush(r0);
3029 3030
      }
      __ mov(r2, Operand(Smi::FromInt(JUMPING + i)));
kmillikin@chromium.org's avatar
kmillikin@chromium.org committed
3031 3032 3033 3034
      if (--nof_unlinks > 0) {
        // If this is not the last unlink block, jump around the next.
        finally_block.Jump();
      }
3035 3036 3037
    }
  }

3038
  // --- Finally block ---
3039
  finally_block.Bind();
3040

3041
  // Push the state on the stack.
3042
  frame_->EmitPush(r2);
3043

3044
  // We keep two elements on the stack - the (possibly faked) result
3045 3046
  // and the state - while evaluating the finally block.
  //
3047
  // Generate code for the statements in the finally block.
3048 3049 3050
  { VirtualFrame::RegisterAllocationScope scope(this);
    VisitStatements(node->finally_block()->statements());
  }
3051

3052 3053 3054 3055
  if (has_valid_frame()) {
    // Restore state and return value or faked TOS.
    frame_->EmitPop(r2);
    frame_->EmitPop(r0);
3056
  }
3057

3058 3059 3060 3061 3062 3063 3064
  // Generate code to jump to the right destination for all used
  // formerly shadowing targets.  Deallocate each shadow target.
  for (int i = 0; i < shadows.length(); i++) {
    if (has_valid_frame() && shadows[i]->is_bound()) {
      JumpTarget* original = shadows[i]->other_target();
      __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
      if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
3065
        JumpTarget skip;
3066 3067 3068 3069 3070 3071
        skip.Branch(ne);
        frame_->PrepareForReturn();
        original->Jump();
        skip.Bind();
      } else {
        original->Branch(eq);
3072
      }
3073
    }
3074
  }
3075

3076
  if (has_valid_frame()) {
3077
    // Check if we need to rethrow the exception.
3078
    JumpTarget exit;
3079 3080
    __ cmp(r2, Operand(Smi::FromInt(THROWING)));
    exit.Branch(ne);
3081

3082 3083 3084
    // Rethrow exception.
    frame_->EmitPush(r0);
    frame_->CallRuntime(Runtime::kReThrow, 1);
3085

3086 3087 3088 3089
    // Done.
    exit.Bind();
  }
  ASSERT(!has_valid_frame() || frame_->height() == original_height);
3090 3091 3092
}


3093
void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
3094 3095 3096
#ifdef DEBUG
  int original_height = frame_->height();
#endif
3097
  Comment cmnt(masm_, "[ DebuggerStatament");
3098
  CodeForStatementPosition(node);
3099
#ifdef ENABLE_DEBUGGER_SUPPORT
serya@chromium.org's avatar
serya@chromium.org committed
3100
  frame_->DebugBreak();
3101
#endif
3102
  // Ignore the return value.
3103
  ASSERT(frame_->height() == original_height);
3104 3105 3106
}


3107 3108
void CodeGenerator::InstantiateFunction(
    Handle<SharedFunctionInfo> function_info) {
3109 3110
  // Use the fast case closure allocation code that allocates in new
  // space for nested functions that don't need literals cloning.
3111
  if (scope()->is_function_scope() && function_info->num_literals() == 0) {
3112
    FastNewClosureStub stub;
3113 3114
    frame_->EmitPush(Operand(function_info));
    frame_->SpillAll();
3115 3116 3117 3118 3119
    frame_->CallStub(&stub, 1);
    frame_->EmitPush(r0);
  } else {
    // Create a new closure.
    frame_->EmitPush(cp);
3120
    frame_->EmitPush(Operand(function_info));
3121 3122 3123
    frame_->CallRuntime(Runtime::kNewClosure, 2);
    frame_->EmitPush(r0);
  }
3124 3125 3126
}


3127
void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
3128 3129 3130
#ifdef DEBUG
  int original_height = frame_->height();
#endif
3131
  Comment cmnt(masm_, "[ FunctionLiteral");
3132

3133 3134
  // Build the function info and instantiate it.
  Handle<SharedFunctionInfo> function_info =
3135 3136 3137
      Compiler::BuildFunctionInfo(node, script());
  if (function_info.is_null()) {
    SetStackOverflow();
3138 3139 3140
    ASSERT(frame_->height() == original_height);
    return;
  }
3141
  InstantiateFunction(function_info);
3142
  ASSERT_EQ(original_height + 1, frame_->height());
3143 3144 3145
}


3146 3147
void CodeGenerator::VisitSharedFunctionInfoLiteral(
    SharedFunctionInfoLiteral* node) {
3148 3149 3150
#ifdef DEBUG
  int original_height = frame_->height();
#endif
3151 3152
  Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
  InstantiateFunction(node->shared_function_info());
3153
  ASSERT_EQ(original_height + 1, frame_->height());
3154 3155 3156
}


3157
void CodeGenerator::VisitConditional(Conditional* node) {
3158 3159 3160
#ifdef DEBUG
  int original_height = frame_->height();
#endif
3161
  Comment cmnt(masm_, "[ Conditional");
3162 3163
  JumpTarget then;
  JumpTarget else_;
3164
  LoadCondition(node->condition(), &then, &else_, true);
3165
  if (has_valid_frame()) {
3166 3167
    Branch(false, &else_);
  }
3168
  if (has_valid_frame() || then.is_linked()) {
3169
    then.Bind();
3170
    Load(node->then_expression());
3171 3172
  }
  if (else_.is_linked()) {
3173 3174
    JumpTarget exit;
    if (has_valid_frame()) exit.Jump();
3175
    else_.Bind();
3176
    Load(node->else_expression());
3177
    if (exit.is_linked()) exit.Bind();
3178
  }
3179
  ASSERT_EQ(original_height + 1, frame_->height());
3180 3181 3182
}


3183
void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
3184
  if (slot->type() == Slot::LOOKUP) {
3185
    ASSERT(slot->var()->is_dynamic());
3186

3187 3188
    // JumpTargets do not yet support merging frames so the frame must be
    // spilled when jumping to these targets.
3189 3190
    JumpTarget slow;
    JumpTarget done;
3191

3192 3193 3194 3195 3196 3197 3198
    // Generate fast case for loading from slots that correspond to
    // local/global variables or arguments unless they are shadowed by
    // eval-introduced bindings.
    EmitDynamicLoadFromSlotFastCase(slot,
                                    typeof_state,
                                    &slow,
                                    &done);
3199

3200 3201
    slow.Bind();
    frame_->EmitPush(cp);
3202
    frame_->EmitPush(Operand(slot->var()->name()));
3203

3204
    if (typeof_state == INSIDE_TYPEOF) {
3205
      frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
3206
    } else {
3207
      frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
3208
    }
3209

3210 3211
    done.Bind();
    frame_->EmitPush(r0);
3212

3213
  } else {
3214
    Register scratch = VirtualFrame::scratch0();
3215 3216
    TypeInfo info = type_info(slot);
    frame_->EmitPush(SlotOperand(slot, scratch), info);
3217

3218
    if (slot->var()->mode() == Variable::CONST) {
3219 3220 3221 3222
      // Const slots may contain 'the hole' value (the constant hasn't been
      // initialized yet) which needs to be converted into the 'undefined'
      // value.
      Comment cmnt(masm_, "[ Unhole const");
3223
      Register tos = frame_->PopToRegister();
3224
      __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3225 3226 3227
      __ cmp(tos, ip);
      __ LoadRoot(tos, Heap::kUndefinedValueRootIndex, eq);
      frame_->EmitPush(tos);
3228 3229
    }
  }
3230 3231 3232
}


3233 3234
void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
                                                  TypeofState state) {
3235
  VirtualFrame::RegisterAllocationScope scope(this);
3236 3237 3238 3239 3240 3241 3242 3243
  LoadFromSlot(slot, state);

  // Bail out quickly if we're not using lazy arguments allocation.
  if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;

  // ... or if the slot isn't a non-parameter arguments slot.
  if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;

3244
  // Load the loaded value from the stack into a register but leave it on the
3245
  // stack.
3246
  Register tos = frame_->Peek();
3247 3248 3249 3250 3251

  // If the loaded value is the sentinel that indicates that we
  // haven't loaded the arguments object yet, we need to do it now.
  JumpTarget exit;
  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3252
  __ cmp(tos, ip);
3253 3254 3255 3256 3257 3258 3259
  exit.Branch(ne);
  frame_->Drop();
  StoreArgumentsObject(false);
  exit.Bind();
}


3260 3261
void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
  ASSERT(slot != NULL);
3262
  VirtualFrame::RegisterAllocationScope scope(this);
3263 3264 3265 3266 3267
  if (slot->type() == Slot::LOOKUP) {
    ASSERT(slot->var()->is_dynamic());

    // For now, just do a runtime call.
    frame_->EmitPush(cp);
3268
    frame_->EmitPush(Operand(slot->var()->name()));
3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295

    if (init_state == CONST_INIT) {
      // Same as the case for a normal store, but ignores attribute
      // (e.g. READ_ONLY) of context slot so that we can initialize
      // const properties (introduced via eval("const foo = (some
      // expr);")). Also, uses the current function context instead of
      // the top context.
      //
      // Note that we must declare the foo upon entry of eval(), via a
      // context slot declaration, but we cannot initialize it at the
      // same time, because the const declaration may be at the end of
      // the eval code (sigh...) and the const variable may have been
      // used before (where its value is 'undefined'). Thus, we can only
      // do the initialization when we actually encounter the expression
      // and when the expression operands are defined and valid, and
      // thus we need the split into 2 operations: declaration of the
      // context slot followed by initialization.
      frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
    } else {
      frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
    }
    // Storing a variable must keep the (new) value on the expression
    // stack. This is necessary for compiling assignment expressions.
    frame_->EmitPush(r0);

  } else {
    ASSERT(!slot->var()->is_dynamic());
3296
    Register scratch = VirtualFrame::scratch0();
3297
    Register scratch2 = VirtualFrame::scratch1();
3298

3299
    // The frame must be spilled when branching to this target.
3300
    JumpTarget exit;
3301

3302 3303 3304 3305 3306 3307
    if (init_state == CONST_INIT) {
      ASSERT(slot->var()->mode() == Variable::CONST);
      // Only the first const initialization must be executed (the slot
      // still contains 'the hole' value). When the assignment is
      // executed, the code is identical to a normal store (see below).
      Comment cmnt(masm_, "[ Init const");
3308
      __ ldr(scratch, SlotOperand(slot, scratch));
3309
      __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3310
      __ cmp(scratch, ip);
3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322
      exit.Branch(ne);
    }

    // We must execute the store.  Storing a variable must keep the
    // (new) value on the stack. This is necessary for compiling
    // assignment expressions.
    //
    // Note: We will reach here even with slot->var()->mode() ==
    // Variable::CONST because of const declarations which will
    // initialize consts to 'the hole' value and by doing so, end up
    // calling this code.  r2 may be loaded with context; used below in
    // RecordWrite.
3323 3324
    Register tos = frame_->Peek();
    __ str(tos, SlotOperand(slot, scratch));
3325 3326
    if (slot->type() == Slot::CONTEXT) {
      // Skip write barrier if the written value is a smi.
3327 3328
      __ tst(tos, Operand(kSmiTagMask));
      // We don't use tos any more after here.
3329
      exit.Branch(eq);
3330
      // scratch is loaded with context when calling SlotOperand above.
3331
      int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
3332 3333 3334 3335
      // We need an extra register.  Until we have a way to do that in the
      // virtual frame we will cheat and ask for a free TOS register.
      Register scratch3 = frame_->GetTOSRegister();
      __ RecordWrite(scratch, Operand(offset), scratch2, scratch3);
3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346
    }
    // If we definitely did not jump over the assignment, we do not need
    // to bind the exit label.  Doing so can defeat peephole
    // optimization.
    if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
      exit.Bind();
    }
  }
}


3347 3348
void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
                                                      TypeofState typeof_state,
3349
                                                      JumpTarget* slow) {
3350 3351
  // Check that no extension objects have been created by calls to
  // eval from the current scope to the global scope.
3352 3353
  Register tmp = frame_->scratch0();
  Register tmp2 = frame_->scratch1();
3354
  Register context = cp;
3355 3356
  Scope* s = scope();
  while (s != NULL) {
3357 3358
    if (s->num_heap_slots() > 0) {
      if (s->calls_eval()) {
3359
        frame_->SpillAll();
3360 3361 3362
        // Check that extension is NULL.
        __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
        __ tst(tmp2, tmp2);
3363
        slow->Branch(ne);
3364 3365 3366 3367 3368 3369 3370 3371
      }
      // Load next context in chain.
      __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
      __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
      context = tmp;
    }
    // If no outer scope calls eval, we do not need to check more
    // context extensions.
3372 3373 3374 3375 3376
    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
    s = s->outer_scope();
  }

  if (s->is_eval_scope()) {
3377
    frame_->SpillAll();
3378
    Label next, fast;
3379
    __ Move(tmp, context);
3380 3381 3382
    __ bind(&next);
    // Terminate at global context.
    __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
3383 3384
    __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
    __ cmp(tmp2, ip);
3385 3386 3387 3388
    __ b(eq, &fast);
    // Check that extension is NULL.
    __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX));
    __ tst(tmp2, tmp2);
3389
    slow->Branch(ne);
3390 3391 3392 3393 3394
    // Load next context in chain.
    __ ldr(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX));
    __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
    __ b(&next);
    __ bind(&fast);
3395 3396 3397 3398
  }

  // Load the global object.
  LoadGlobal();
3399
  // Setup the name register and call load IC.
3400 3401 3402 3403
  frame_->CallLoadIC(slot->var()->name(),
                     typeof_state == INSIDE_TYPEOF
                         ? RelocInfo::CODE_TARGET
                         : RelocInfo::CODE_TARGET_CONTEXT);
3404 3405 3406
}


3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422
void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
                                                    TypeofState typeof_state,
                                                    JumpTarget* slow,
                                                    JumpTarget* done) {
  // Generate fast-case code for variables that might be shadowed by
  // eval-introduced variables.  Eval is used a lot without
  // introducing variables.  In those cases, we do not want to
  // perform a runtime call for all variables in the scope
  // containing the eval.
  if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
    LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
    frame_->SpillAll();
    done->Jump();

  } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
    frame_->SpillAll();
3423
    Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451
    Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
    if (potential_slot != NULL) {
      // Generate fast case for locals that rewrite to slots.
      __ ldr(r0,
             ContextSlotOperandCheckExtensions(potential_slot,
                                               r1,
                                               r2,
                                               slow));
      if (potential_slot->var()->mode() == Variable::CONST) {
        __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
        __ cmp(r0, ip);
        __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
      }
      done->Jump();
    } else if (rewrite != NULL) {
      // Generate fast case for argument loads.
      Property* property = rewrite->AsProperty();
      if (property != NULL) {
        VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
        Literal* key_literal = property->key()->AsLiteral();
        if (obj_proxy != NULL &&
            key_literal != NULL &&
            obj_proxy->IsArguments() &&
            key_literal->handle()->IsSmi()) {
          // Load arguments object if there are no eval-introduced
          // variables. Then load the argument from the arguments
          // object using keyed load.
          __ ldr(r0,
3452
                 ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467
                                                   r1,
                                                   r2,
                                                   slow));
          frame_->EmitPush(r0);
          __ mov(r1, Operand(key_literal->handle()));
          frame_->EmitPush(r1);
          EmitKeyedLoad();
          done->Jump();
        }
      }
    }
  }
}


3468
void CodeGenerator::VisitSlot(Slot* node) {
3469 3470 3471
#ifdef DEBUG
  int original_height = frame_->height();
#endif
3472
  Comment cmnt(masm_, "[ Slot");
3473
  LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
3474
  ASSERT_EQ(original_height + 1, frame_->height());
3475 3476 3477
}


3478
void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
3479 3480 3481
#ifdef DEBUG
  int original_height = frame_->height();
#endif
3482
  Comment cmnt(masm_, "[ VariableProxy");
3483

3484 3485
  Variable* var = node->var();
  Expression* expr = var->rewrite();
3486 3487
  if (expr != NULL) {
    Visit(expr);
3488
  } else {
3489 3490
    ASSERT(var->is_global());
    Reference ref(this, node);
3491
    ref.GetValue();
3492
  }
3493
  ASSERT_EQ(original_height + 1, frame_->height());
3494
}
3495 3496


3497
void CodeGenerator::VisitLiteral(Literal* node) {
3498 3499 3500
#ifdef DEBUG
  int original_height = frame_->height();
#endif
3501
  Comment cmnt(masm_, "[ Literal");
3502
  Register reg = frame_->GetTOSRegister();
3503
  bool is_smi = node->handle()->IsSmi();
3504
  __ mov(reg, Operand(node->handle()));
3505
  frame_->EmitPush(reg, is_smi ? TypeInfo::Smi() : TypeInfo::Unknown());
3506
  ASSERT_EQ(original_height + 1, frame_->height());
3507 3508 3509
}


3510
void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
3511 3512 3513
#ifdef DEBUG
  int original_height = frame_->height();
#endif
3514
  Comment cmnt(masm_, "[ RexExp Literal");
3515

3516 3517 3518 3519
  Register tmp = VirtualFrame::scratch0();
  // Free up a TOS register that can be used to push the literal.
  Register literal = frame_->GetTOSRegister();

3520
  // Retrieve the literal array and check the allocated entry.
3521

3522
  // Load the function of this activation.
3523
  __ ldr(tmp, frame_->Function());
3524

3525
  // Load the literals array of the function.
3526
  __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kLiteralsOffset));
3527

3528 3529 3530
  // Load the literal at the ast saved index.
  int literal_offset =
      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
3531
  __ ldr(literal, FieldMemOperand(tmp, literal_offset));
3532

3533
  JumpTarget materialized;
3534
  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
3535 3536 3537 3538
  __ cmp(literal, ip);
  // This branch locks the virtual frame at the done label to match the
  // one we have here, where the literal register is not on the stack and
  // nothing is spilled.
3539
  materialized.Branch(ne);
3540

3541
  // If the entry is undefined we call the runtime system to compute
3542
  // the literal.
3543 3544 3545 3546 3547 3548 3549 3550
  // literal array  (0)
  frame_->EmitPush(tmp);
  // literal index  (1)
  frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
  // RegExp pattern (2)
  frame_->EmitPush(Operand(node->pattern()));
  // RegExp flags   (3)
  frame_->EmitPush(Operand(node->flags()));
3551
  frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
3552
  __ Move(literal, r0);
3553

3554 3555
  materialized.Bind();

3556
  frame_->EmitPush(literal);
3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570
  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
  frame_->EmitPush(Operand(Smi::FromInt(size)));
  frame_->CallRuntime(Runtime::kAllocateInNewSpace, 1);
  // TODO(lrn): Use AllocateInNewSpace macro with fallback to runtime.
  // r0 is newly allocated space.

  // Reuse literal variable with (possibly) a new register, still holding
  // the materialized boilerplate.
  literal = frame_->PopToRegister(r0);

  __ CopyFields(r0, literal, tmp.bit(), size / kPointerSize);

  // Push the clone.
  frame_->EmitPush(r0);
3571
  ASSERT_EQ(original_height + 1, frame_->height());
3572
}
3573 3574


3575
void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
3576 3577 3578
#ifdef DEBUG
  int original_height = frame_->height();
#endif
3579
  Comment cmnt(masm_, "[ ObjectLiteral");
3580

3581
  Register literal = frame_->GetTOSRegister();
3582
  // Load the function of this activation.
3583
  __ ldr(literal, frame_->Function());
3584
  // Literal array.
3585 3586
  __ ldr(literal, FieldMemOperand(literal, JSFunction::kLiteralsOffset));
  frame_->EmitPush(literal);
3587
  // Literal index.
3588
  frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
3589
  // Constant properties.
3590
  frame_->EmitPush(Operand(node->constant_properties()));
3591
  // Should the object literal have fast elements?
3592
  frame_->EmitPush(Operand(Smi::FromInt(node->fast_elements() ? 1 : 0)));
3593
  if (node->depth() > 1) {
3594
    frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
3595
  } else {
3596
    frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
3597
  }
3598
  frame_->EmitPush(r0);  // save the result
3599
  for (int i = 0; i < node->properties()->length(); i++) {
3600 3601
    // At the start of each iteration, the top of stack contains
    // the newly created object literal.
3602 3603 3604 3605
    ObjectLiteral::Property* property = node->properties()->at(i);
    Literal* key = property->key();
    Expression* value = property->value();
    switch (property->kind()) {
3606 3607 3608
      case ObjectLiteral::Property::CONSTANT:
        break;
      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
3609
        if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
3610
        // else fall through
3611 3612 3613
      case ObjectLiteral::Property::COMPUTED:
        if (key->handle()->IsSymbol()) {
          Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
3614
          Load(value);
3615 3616 3617
          frame_->PopToR0();
          // Fetch the object literal.
          frame_->SpillAllButCopyTOSToR1();
3618 3619 3620 3621 3622
          __ mov(r2, Operand(key->handle()));
          frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
          break;
        }
        // else fall through
3623
      case ObjectLiteral::Property::PROTOTYPE: {
3624
        frame_->Dup();
3625 3626
        Load(key);
        Load(value);
3627
        frame_->CallRuntime(Runtime::kSetProperty, 3);
3628 3629 3630
        break;
      }
      case ObjectLiteral::Property::SETTER: {
3631
        frame_->Dup();
3632
        Load(key);
3633
        frame_->EmitPush(Operand(Smi::FromInt(1)));
3634
        Load(value);
3635
        frame_->CallRuntime(Runtime::kDefineAccessor, 4);
3636
        break;
3637
      }
3638
      case ObjectLiteral::Property::GETTER: {
3639
        frame_->Dup();
3640
        Load(key);
3641
        frame_->EmitPush(Operand(Smi::FromInt(0)));
3642
        Load(value);
3643
        frame_->CallRuntime(Runtime::kDefineAccessor, 4);
3644
        break;
3645
      }
3646 3647
    }
  }
3648
  ASSERT_EQ(original_height + 1, frame_->height());
3649
}
3650 3651


3652
void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
3653 3654 3655
#ifdef DEBUG
  int original_height = frame_->height();
#endif
3656
  Comment cmnt(masm_, "[ ArrayLiteral");
3657

3658
  Register tos = frame_->GetTOSRegister();
3659
  // Load the function of this activation.
3660
  __ ldr(tos, frame_->Function());
3661
  // Load the literals array of the function.
3662 3663 3664 3665
  __ ldr(tos, FieldMemOperand(tos, JSFunction::kLiteralsOffset));
  frame_->EmitPush(tos);
  frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
  frame_->EmitPush(Operand(node->constant_elements()));
3666
  int length = node->values()->length();
3667 3668 3669 3670 3671 3672
  if (node->constant_elements()->map() == Heap::fixed_cow_array_map()) {
    FastCloneShallowArrayStub stub(
        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
    frame_->CallStub(&stub, 3);
    __ IncrementCounter(&Counters::cow_arrays_created_stub, 1, r1, r2);
  } else if (node->depth() > 1) {
3673
    frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
3674
  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
3675
    frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
3676
  } else {
3677 3678
    FastCloneShallowArrayStub stub(
        FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
3679
    frame_->CallStub(&stub, 3);
3680 3681
  }
  frame_->EmitPush(r0);  // save the result
3682
  // r0: created object literal
3683

3684 3685 3686 3687
  // Generate code to set the elements in the array that are not
  // literals.
  for (int i = 0; i < node->values()->length(); i++) {
    Expression* value = node->values()->at(i);
3688

3689 3690 3691 3692 3693 3694 3695 3696
    // If value is a literal the property value is already set in the
    // boilerplate object.
    if (value->AsLiteral() != NULL) continue;
    // If value is a materialized literal the property value is already set
    // in the boilerplate object if it is simple.
    if (CompileTimeValue::IsCompileTimeValue(value)) continue;

    // The property must be set by generated code.
3697
    Load(value);
3698
    frame_->PopToR0();
3699
    // Fetch the object literal.
3700 3701
    frame_->SpillAllButCopyTOSToR1();

3702 3703
    // Get the elements array.
    __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
3704

3705
    // Write to the indexed properties array.
3706
    int offset = i * kPointerSize + FixedArray::kHeaderSize;
3707
    __ str(r0, FieldMemOperand(r1, offset));
3708

3709
    // Update the write barrier for the array address.
3710
    __ RecordWrite(r1, Operand(offset), r3, r2);
3711
  }
3712
  ASSERT_EQ(original_height + 1, frame_->height());
3713
}
3714 3715


3716
void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
3717 3718 3719
#ifdef DEBUG
  int original_height = frame_->height();
#endif
3720 3721
  // Call runtime routine to allocate the catch extension object and
  // assign the exception value to the catch variable.
3722
  Comment cmnt(masm_, "[ CatchExtensionObject");
3723 3724
  Load(node->key());
  Load(node->value());
3725 3726
  frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
  frame_->EmitPush(r0);
3727
  ASSERT_EQ(original_height + 1, frame_->height());
3728 3729 3730
}


3731
void CodeGenerator::EmitSlotAssignment(Assignment* node) {
3732 3733 3734
#ifdef DEBUG
  int original_height = frame_->height();
#endif
3735 3736 3737
  Comment cmnt(masm(), "[ Variable Assignment");
  Variable* var = node->target()->AsVariableProxy()->AsVariable();
  ASSERT(var != NULL);
3738
  Slot* slot = var->AsSlot();
3739
  ASSERT(slot != NULL);
3740

3741 3742 3743 3744 3745 3746 3747 3748
  // Evaluate the right-hand side.
  if (node->is_compound()) {
    // For a compound assignment the right-hand side is a binary operation
    // between the current property value and the actual right-hand side.
    LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);

    // Perform the binary operation.
    Literal* literal = node->value()->AsLiteral();
3749
    bool overwrite_value = node->value()->ResultOverwriteAllowed();
3750 3751 3752 3753 3754 3755
    if (literal != NULL && literal->handle()->IsSmi()) {
      SmiOperation(node->binary_op(),
                   literal->handle(),
                   false,
                   overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
    } else {
3756 3757 3758 3759 3760 3761
      GenerateInlineSmi inline_smi =
          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
      if (literal != NULL) {
        ASSERT(!literal->handle()->IsSmi());
        inline_smi = DONT_GENERATE_INLINE_SMI;
      }
3762
      Load(node->value());
3763 3764 3765
      GenericBinaryOperation(node->binary_op(),
                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
                             inline_smi);
3766
    }
3767 3768 3769
  } else {
    Load(node->value());
  }
3770

3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808
  // Perform the assignment.
  if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
    CodeForSourcePosition(node->position());
    StoreToSlot(slot,
                node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
  }
  ASSERT_EQ(original_height + 1, frame_->height());
}


void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
#ifdef DEBUG
  int original_height = frame_->height();
#endif
  Comment cmnt(masm(), "[ Named Property Assignment");
  Variable* var = node->target()->AsVariableProxy()->AsVariable();
  Property* prop = node->target()->AsProperty();
  ASSERT(var == NULL || (prop == NULL && var->is_global()));

  // Initialize name and evaluate the receiver sub-expression if necessary. If
  // the receiver is trivial it is not placed on the stack at this point, but
  // loaded whenever actually needed.
  Handle<String> name;
  bool is_trivial_receiver = false;
  if (var != NULL) {
    name = var->name();
  } else {
    Literal* lit = prop->key()->AsLiteral();
    ASSERT_NOT_NULL(lit);
    name = Handle<String>::cast(lit->handle());
    // Do not materialize the receiver on the frame if it is trivial.
    is_trivial_receiver = prop->obj()->IsTrivial();
    if (!is_trivial_receiver) Load(prop->obj());
  }

  // Change to slow case in the beginning of an initialization block to
  // avoid the quadratic behavior of repeatedly adding fast properties.
  if (node->starts_initialization_block()) {
3809
    // Initialization block consists of assignments of the form expr.x = ..., so
3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846
    // this will never be an assignment to a variable, so there must be a
    // receiver object.
    ASSERT_EQ(NULL, var);
    if (is_trivial_receiver) {
      Load(prop->obj());
    } else {
      frame_->Dup();
    }
    frame_->CallRuntime(Runtime::kToSlowProperties, 1);
  }

  // Change to fast case at the end of an initialization block. To prepare for
  // that add an extra copy of the receiver to the frame, so that it can be
  // converted back to fast case after the assignment.
  if (node->ends_initialization_block() && !is_trivial_receiver) {
    frame_->Dup();
  }

  // Stack layout:
  // [tos]   : receiver (only materialized if non-trivial)
  // [tos+1] : receiver if at the end of an initialization block

  // Evaluate the right-hand side.
  if (node->is_compound()) {
    // For a compound assignment the right-hand side is a binary operation
    // between the current property value and the actual right-hand side.
    if (is_trivial_receiver) {
      Load(prop->obj());
    } else if (var != NULL) {
      LoadGlobal();
    } else {
      frame_->Dup();
    }
    EmitNamedLoad(name, var != NULL);

    // Perform the binary operation.
    Literal* literal = node->value()->AsLiteral();
3847
    bool overwrite_value = node->value()->ResultOverwriteAllowed();
3848 3849 3850 3851 3852 3853
    if (literal != NULL && literal->handle()->IsSmi()) {
      SmiOperation(node->binary_op(),
                   literal->handle(),
                   false,
                   overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
    } else {
3854 3855 3856 3857 3858 3859
      GenerateInlineSmi inline_smi =
          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
      if (literal != NULL) {
        ASSERT(!literal->handle()->IsSmi());
        inline_smi = DONT_GENERATE_INLINE_SMI;
      }
3860
      Load(node->value());
3861 3862 3863
      GenericBinaryOperation(node->binary_op(),
                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
                             inline_smi);
3864 3865 3866 3867 3868
    }
  } else {
    // For non-compound assignment just load the right-hand side.
    Load(node->value());
  }
3869

3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903
  // Stack layout:
  // [tos]   : value
  // [tos+1] : receiver (only materialized if non-trivial)
  // [tos+2] : receiver if at the end of an initialization block

  // Perform the assignment.  It is safe to ignore constants here.
  ASSERT(var == NULL || var->mode() != Variable::CONST);
  ASSERT_NE(Token::INIT_CONST, node->op());
  if (is_trivial_receiver) {
    // Load the receiver and swap with the value.
    Load(prop->obj());
    Register t0 = frame_->PopToRegister();
    Register t1 = frame_->PopToRegister(t0);
    frame_->EmitPush(t0);
    frame_->EmitPush(t1);
  }
  CodeForSourcePosition(node->position());
  bool is_contextual = (var != NULL);
  EmitNamedStore(name, is_contextual);
  frame_->EmitPush(r0);

  // Change to fast case at the end of an initialization block.
  if (node->ends_initialization_block()) {
    ASSERT_EQ(NULL, var);
    // The argument to the runtime call is the receiver.
    if (is_trivial_receiver) {
      Load(prop->obj());
    } else {
      // A copy of the receiver is below the value of the assignment. Swap
      // the receiver and the value of the assignment expression.
      Register t0 = frame_->PopToRegister();
      Register t1 = frame_->PopToRegister(t0);
      frame_->EmitPush(t0);
      frame_->EmitPush(t1);
3904
    }
3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925
    frame_->CallRuntime(Runtime::kToFastProperties, 1);
  }

  // Stack layout:
  // [tos]   : result

  ASSERT_EQ(original_height + 1, frame_->height());
}


void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
#ifdef DEBUG
  int original_height = frame_->height();
#endif
  Comment cmnt(masm_, "[ Keyed Property Assignment");
  Property* prop = node->target()->AsProperty();
  ASSERT_NOT_NULL(prop);

  // Evaluate the receiver subexpression.
  Load(prop->obj());

3926 3927
  WriteBarrierCharacter wb_info;

3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948
  // Change to slow case in the beginning of an initialization block to
  // avoid the quadratic behavior of repeatedly adding fast properties.
  if (node->starts_initialization_block()) {
    frame_->Dup();
    frame_->CallRuntime(Runtime::kToSlowProperties, 1);
  }

  // Change to fast case at the end of an initialization block. To prepare for
  // that add an extra copy of the receiver to the frame, so that it can be
  // converted back to fast case after the assignment.
  if (node->ends_initialization_block()) {
    frame_->Dup();
  }

  // Evaluate the key subexpression.
  Load(prop->key());

  // Stack layout:
  // [tos]   : key
  // [tos+1] : receiver
  // [tos+2] : receiver if at the end of an initialization block
3949
  //
3950 3951 3952 3953
  // Evaluate the right-hand side.
  if (node->is_compound()) {
    // For a compound assignment the right-hand side is a binary operation
    // between the current property value and the actual right-hand side.
3954 3955
    // Duplicate receiver and key for loading the current property value.
    frame_->Dup2();
3956 3957 3958 3959 3960
    EmitKeyedLoad();
    frame_->EmitPush(r0);

    // Perform the binary operation.
    Literal* literal = node->value()->AsLiteral();
3961
    bool overwrite_value = node->value()->ResultOverwriteAllowed();
3962 3963 3964 3965 3966
    if (literal != NULL && literal->handle()->IsSmi()) {
      SmiOperation(node->binary_op(),
                   literal->handle(),
                   false,
                   overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3967
    } else {
3968 3969 3970 3971 3972 3973
      GenerateInlineSmi inline_smi =
          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
      if (literal != NULL) {
        ASSERT(!literal->handle()->IsSmi());
        inline_smi = DONT_GENERATE_INLINE_SMI;
      }
3974
      Load(node->value());
3975 3976 3977
      GenericBinaryOperation(node->binary_op(),
                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
                             inline_smi);
3978
    }
3979
    wb_info = node->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
3980 3981 3982
  } else {
    // For non-compound assignment just load the right-hand side.
    Load(node->value());
3983 3984 3985
    wb_info = node->value()->AsLiteral() != NULL ?
        NEVER_NEWSPACE :
        (node->value()->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI);
3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996
  }

  // Stack layout:
  // [tos]   : value
  // [tos+1] : key
  // [tos+2] : receiver
  // [tos+3] : receiver if at the end of an initialization block

  // Perform the assignment.  It is safe to ignore constants here.
  ASSERT(node->op() != Token::INIT_CONST);
  CodeForSourcePosition(node->position());
3997
  EmitKeyedStore(prop->key()->type(), wb_info);
3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054
  frame_->EmitPush(r0);

  // Stack layout:
  // [tos]   : result
  // [tos+1] : receiver if at the end of an initialization block

  // Change to fast case at the end of an initialization block.
  if (node->ends_initialization_block()) {
    // The argument to the runtime call is the extra copy of the receiver,
    // which is below the value of the assignment.  Swap the receiver and
    // the value of the assignment expression.
    Register t0 = frame_->PopToRegister();
    Register t1 = frame_->PopToRegister(t0);
    frame_->EmitPush(t1);
    frame_->EmitPush(t0);
    frame_->CallRuntime(Runtime::kToFastProperties, 1);
  }

  // Stack layout:
  // [tos]   : result

  ASSERT_EQ(original_height + 1, frame_->height());
}


void CodeGenerator::VisitAssignment(Assignment* node) {
  VirtualFrame::RegisterAllocationScope scope(this);
#ifdef DEBUG
  int original_height = frame_->height();
#endif
  Comment cmnt(masm_, "[ Assignment");

  Variable* var = node->target()->AsVariableProxy()->AsVariable();
  Property* prop = node->target()->AsProperty();

  if (var != NULL && !var->is_global()) {
    EmitSlotAssignment(node);

  } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
             (var != NULL && var->is_global())) {
    // Properties whose keys are property names and global variables are
    // treated as named property references.  We do not need to consider
    // global 'this' because it is not a valid left-hand side.
    EmitNamedPropertyAssignment(node);

  } else if (prop != NULL) {
    // Other properties (including rewritten parameters for a function that
    // uses arguments) are keyed property assignments.
    EmitKeyedPropertyAssignment(node);

  } else {
    // Invalid left-hand side.
    Load(node->target());
    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
    // The runtime call doesn't actually return but the code generator will
    // still generate code and expects a certain frame height.
    frame_->EmitPush(r0);
4055
  }
4056
  ASSERT_EQ(original_height + 1, frame_->height());
4057 4058 4059
}


4060
void CodeGenerator::VisitThrow(Throw* node) {
4061 4062 4063
#ifdef DEBUG
  int original_height = frame_->height();
#endif
4064
  Comment cmnt(masm_, "[ Throw");
4065

4066
  Load(node->exception());
4067
  CodeForSourcePosition(node->position());
4068 4069
  frame_->CallRuntime(Runtime::kThrow, 1);
  frame_->EmitPush(r0);
4070
  ASSERT_EQ(original_height + 1, frame_->height());
4071
}
4072 4073


4074
void CodeGenerator::VisitProperty(Property* node) {
4075 4076 4077
#ifdef DEBUG
  int original_height = frame_->height();
#endif
4078
  Comment cmnt(masm_, "[ Property");
4079

4080
  { Reference property(this, node);
4081
    property.GetValue();
4082
  }
4083
  ASSERT_EQ(original_height + 1, frame_->height());
4084
}
4085 4086


4087
void CodeGenerator::VisitCall(Call* node) {
4088 4089 4090
#ifdef DEBUG
  int original_height = frame_->height();
#endif
4091
  Comment cmnt(masm_, "[ Call");
4092

4093
  Expression* function = node->expression();
4094
  ZoneList<Expression*>* args = node->arguments();
4095

4096 4097 4098 4099
  // Standard function call.
  // Check if the function is a variable or a property.
  Variable* var = function->AsVariableProxy()->AsVariable();
  Property* property = function->AsProperty();
4100

4101 4102 4103 4104 4105 4106 4107 4108
  // ------------------------------------------------------------------------
  // Fast-case: Use inline caching.
  // ---
  // According to ECMA-262, section 11.2.3, page 44, the function to call
  // must be resolved after the arguments have been evaluated. The IC code
  // automatically handles this by loading the arguments before the function
  // is resolved in cache misses (this also holds for megamorphic calls).
  // ------------------------------------------------------------------------
4109

4110 4111 4112 4113 4114 4115 4116 4117 4118
  if (var != NULL && var->is_possibly_eval()) {
    // ----------------------------------
    // JavaScript example: 'eval(arg)'  // eval is not known to be shadowed
    // ----------------------------------

    // In a call to eval, we first call %ResolvePossiblyDirectEval to
    // resolve the function we need to call and the receiver of the
    // call.  Then we call the resolved function using the given
    // arguments.
4119

4120
    // Prepare stack for call to resolved function.
4121
    Load(function);
4122 4123

    // Allocate a frame slot for the receiver.
4124
    frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
4125 4126

    // Load the arguments.
4127 4128
    int arg_count = args->length();
    for (int i = 0; i < arg_count; i++) {
4129
      Load(args->at(i));
4130 4131
    }

4132 4133
    VirtualFrame::SpilledScope spilled_scope(frame_);

4134 4135 4136 4137 4138
    // If we know that eval can only be shadowed by eval-introduced
    // variables we attempt to load the global eval function directly
    // in generated code. If we succeed, there is no need to perform a
    // context lookup in the runtime system.
    JumpTarget done;
4139 4140
    if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
      ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
4141 4142 4143 4144 4145
      JumpTarget slow;
      // Prepare the stack for the call to
      // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
      // function, the first argument to the eval call and the
      // receiver.
4146
      LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167
                                        NOT_INSIDE_TYPEOF,
                                        &slow);
      frame_->EmitPush(r0);
      if (arg_count > 0) {
        __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
        frame_->EmitPush(r1);
      } else {
        frame_->EmitPush(r2);
      }
      __ ldr(r1, frame_->Receiver());
      frame_->EmitPush(r1);

      frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3);

      done.Jump();
      slow.Bind();
    }

    // Prepare the stack for the call to ResolvePossiblyDirectEval by
    // pushing the loaded function, the first argument to the eval
    // call and the receiver.
4168 4169 4170 4171 4172 4173 4174 4175
    __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
    frame_->EmitPush(r1);
    if (arg_count > 0) {
      __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
      frame_->EmitPush(r1);
    } else {
      frame_->EmitPush(r2);
    }
4176 4177 4178
    __ ldr(r1, frame_->Receiver());
    frame_->EmitPush(r1);

4179
    // Resolve the call.
4180
    frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
4181

4182 4183 4184 4185
    // If we generated fast-case code bind the jump-target where fast
    // and slow case merge.
    if (done.is_linked()) done.Bind();

4186
    // Touch up stack with the right values for the function and the receiver.
4187
    __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
4188 4189 4190 4191 4192 4193
    __ str(r1, MemOperand(sp, arg_count * kPointerSize));

    // Call the function.
    CodeForSourcePosition(node->position());

    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
4194
    CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
4195 4196 4197 4198 4199 4200 4201 4202
    frame_->CallStub(&call_function, arg_count + 1);

    __ ldr(cp, frame_->Context());
    // Remove the function from the stack.
    frame_->Drop();
    frame_->EmitPush(r0);

  } else if (var != NULL && !var->is_this() && var->is_global()) {
4203 4204 4205
    // ----------------------------------
    // JavaScript example: 'foo(1, 2, 3)'  // foo is global
    // ----------------------------------
4206 4207 4208 4209
    // Pass the global object as the receiver and let the IC stub
    // patch the stack to use the global proxy as 'this' in the
    // invoked function.
    LoadGlobal();
4210

4211
    // Load the arguments.
4212 4213
    int arg_count = args->length();
    for (int i = 0; i < arg_count; i++) {
4214
      Load(args->at(i));
4215
    }
4216

4217
    VirtualFrame::SpilledScope spilled_scope(frame_);
4218 4219
    // Setup the name register and call the IC initialization code.
    __ mov(r2, Operand(var->name()));
4220 4221
    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
    Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
4222
    CodeForSourcePosition(node->position());
4223 4224
    frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
                           arg_count + 1);
4225
    __ ldr(cp, frame_->Context());
4226
    frame_->EmitPush(r0);
4227

4228 4229
  } else if (var != NULL && var->AsSlot() != NULL &&
             var->AsSlot()->type() == Slot::LOOKUP) {
4230
    // ----------------------------------
4231 4232
    // JavaScript examples:
    //
4233
    //  with (obj) foo(1, 2, 3)  // foo may be in obj.
4234 4235 4236 4237
    //
    //  function f() {};
    //  function g() {
    //    eval(...);
4238
    //    f();  // f could be in extension object.
4239
    //  }
4240
    // ----------------------------------
4241

4242
    JumpTarget slow, done;
4243

4244 4245 4246
    // Generate fast case for loading functions from slots that
    // correspond to local/global variables or arguments unless they
    // are shadowed by eval-introduced bindings.
4247
    EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
4248 4249 4250
                                    NOT_INSIDE_TYPEOF,
                                    &slow,
                                    &done);
4251 4252

    slow.Bind();
4253
    // Load the function
4254
    frame_->EmitPush(cp);
4255
    frame_->EmitPush(Operand(var->name()));
4256
    frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
4257
    // r0: slot value; r1: receiver
4258

4259
    // Load the receiver.
4260 4261
    frame_->EmitPush(r0);  // function
    frame_->EmitPush(r1);  // receiver
4262

4263 4264 4265 4266 4267 4268 4269 4270
    // If fast case code has been generated, emit code to push the
    // function and receiver and have the slow path jump around this
    // code.
    if (done.is_linked()) {
      JumpTarget call;
      call.Jump();
      done.Bind();
      frame_->EmitPush(r0);  // function
4271
      LoadGlobalReceiver(VirtualFrame::scratch0());  // receiver
4272 4273 4274
      call.Bind();
    }

4275 4276
    // Call the function. At this point, everything is spilled but the
    // function and receiver are in r0 and r1.
4277
    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
4278
    frame_->EmitPush(r0);
4279

4280 4281 4282
  } else if (property != NULL) {
    // Check if the key is a literal string.
    Literal* literal = property->key()->AsLiteral();
4283

4284 4285 4286 4287
    if (literal != NULL && literal->handle()->IsSymbol()) {
      // ------------------------------------------------------------------
      // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
      // ------------------------------------------------------------------
4288

4289
      Handle<String> name = Handle<String>::cast(literal->handle());
4290

4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303
      if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
          name->IsEqualTo(CStrVector("apply")) &&
          args->length() == 2 &&
          args->at(1)->AsVariableProxy() != NULL &&
          args->at(1)->AsVariableProxy()->IsArguments()) {
        // Use the optimized Function.prototype.apply that avoids
        // allocating lazily allocated arguments objects.
        CallApplyLazy(property->obj(),
                      args->at(0),
                      args->at(1)->AsVariableProxy(),
                      node->position());

      } else {
4304
        Load(property->obj());  // Receiver.
4305 4306 4307
        // Load the arguments.
        int arg_count = args->length();
        for (int i = 0; i < arg_count; i++) {
4308
          Load(args->at(i));
4309 4310
        }

4311
        VirtualFrame::SpilledScope spilled_scope(frame_);
4312 4313 4314 4315 4316 4317 4318 4319 4320
        // Set the name register and call the IC initialization code.
        __ mov(r2, Operand(name));
        InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
        Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
        CodeForSourcePosition(node->position());
        frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
        __ ldr(cp, frame_->Context());
        frame_->EmitPush(r0);
      }
4321

4322 4323 4324 4325
    } else {
      // -------------------------------------------
      // JavaScript example: 'array[index](1, 2, 3)'
      // -------------------------------------------
4326
      Load(property->obj());
4327
      if (property->is_synthetic()) {
4328 4329 4330
        Load(property->key());
        EmitKeyedLoad();
        // Put the function below the receiver.
4331
        // Use the global receiver.
4332
        frame_->EmitPush(r0);  // Function.
4333
        LoadGlobalReceiver(VirtualFrame::scratch0());
4334 4335 4336
        // Call the function.
        CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
        frame_->EmitPush(r0);
4337
      } else {
4338 4339 4340 4341 4342
        // Load the arguments.
        int arg_count = args->length();
        for (int i = 0; i < arg_count; i++) {
          Load(args->at(i));
        }
4343

4344 4345
        // Set the name register and call the IC initialization code.
        Load(property->key());
4346
        frame_->SpillAll();
4347 4348 4349 4350 4351 4352 4353 4354 4355
        frame_->EmitPop(r2);  // Function name.

        InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
        Handle<Code> stub = ComputeKeyedCallInitialize(arg_count, in_loop);
        CodeForSourcePosition(node->position());
        frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
        __ ldr(cp, frame_->Context());
        frame_->EmitPush(r0);
      }
4356
    }
4357

4358 4359 4360 4361
  } else {
    // ----------------------------------
    // JavaScript example: 'foo(1, 2, 3)'  // foo is not global
    // ----------------------------------
4362

4363
    // Load the function.
4364 4365
    Load(function);

4366
    // Pass the global proxy as the receiver.
4367
    LoadGlobalReceiver(VirtualFrame::scratch0());
4368

4369
    // Call the function.
4370
    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
4371
    frame_->EmitPush(r0);
4372
  }
4373
  ASSERT_EQ(original_height + 1, frame_->height());
4374
}
4375 4376


4377
void CodeGenerator::VisitCallNew(CallNew* node) {
4378 4379 4380
#ifdef DEBUG
  int original_height = frame_->height();
#endif
4381
  Comment cmnt(masm_, "[ CallNew");
4382

4383 4384 4385 4386 4387
  // According to ECMA-262, section 11.2.2, page 44, the function
  // expression in new calls must be evaluated before the
  // arguments. This is different from ordinary calls, where the
  // actual function to call is resolved after the arguments have been
  // evaluated.
4388

4389 4390 4391
  // Push constructor on the stack.  If it's not a function it's used as
  // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
  // ignored.
4392
  Load(node->expression());
4393

4394 4395
  // Push the arguments ("left-to-right") on the stack.
  ZoneList<Expression*>* args = node->arguments();
4396 4397
  int arg_count = args->length();
  for (int i = 0; i < arg_count; i++) {
4398
    Load(args->at(i));
4399
  }
4400

4401
  // Spill everything from here to simplify the implementation.
4402 4403
  VirtualFrame::SpilledScope spilled_scope(frame_);

4404 4405
  // Load the argument count into r0 and the function into r1 as per
  // calling convention.
4406
  __ mov(r0, Operand(arg_count));
4407
  __ ldr(r1, frame_->ElementAt(arg_count));
4408

4409 4410
  // Call the construct call builtin that handles allocation and
  // constructor invocation.
4411
  CodeForSourcePosition(node->position());
4412
  Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
4413
  frame_->CallCodeObject(ic, RelocInfo::CONSTRUCT_CALL, arg_count + 1);
4414
  frame_->EmitPush(r0);
4415

4416
  ASSERT_EQ(original_height + 1, frame_->height());
4417
}
4418 4419


4420
void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
4421
  Register scratch = VirtualFrame::scratch0();
4422
  JumpTarget null, function, leave, non_function_constructor;
4423

4424 4425
  // Load the object into register.
  ASSERT(args->length() == 1);
4426
  Load(args->at(0));
4427
  Register tos = frame_->PopToRegister();
4428 4429

  // If the object is a smi, we return null.
4430
  __ tst(tos, Operand(kSmiTagMask));
4431 4432 4433 4434
  null.Branch(eq);

  // Check that the object is a JS object but take special care of JS
  // functions to make sure they have 'Function' as their class.
4435
  __ CompareObjectType(tos, tos, scratch, FIRST_JS_OBJECT_TYPE);
4436 4437 4438 4439 4440
  null.Branch(lt);

  // As long as JS_FUNCTION_TYPE is the last instance type and it is
  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
  // LAST_JS_OBJECT_TYPE.
4441 4442
  STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
  STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
4443
  __ cmp(scratch, Operand(JS_FUNCTION_TYPE));
4444 4445 4446
  function.Branch(eq);

  // Check if the constructor in the map is a function.
4447 4448
  __ ldr(tos, FieldMemOperand(tos, Map::kConstructorOffset));
  __ CompareObjectType(tos, scratch, scratch, JS_FUNCTION_TYPE);
4449 4450
  non_function_constructor.Branch(ne);

4451
  // The tos register now contains the constructor function. Grab the
4452
  // instance class name from there.
4453 4454 4455 4456
  __ ldr(tos, FieldMemOperand(tos, JSFunction::kSharedFunctionInfoOffset));
  __ ldr(tos,
         FieldMemOperand(tos, SharedFunctionInfo::kInstanceClassNameOffset));
  frame_->EmitPush(tos);
4457 4458 4459 4460
  leave.Jump();

  // Functions have class 'Function'.
  function.Bind();
4461 4462
  __ mov(tos, Operand(Factory::function_class_symbol()));
  frame_->EmitPush(tos);
4463 4464 4465 4466
  leave.Jump();

  // Objects with a non-function constructor have class 'Object'.
  non_function_constructor.Bind();
4467 4468
  __ mov(tos, Operand(Factory::Object_symbol()));
  frame_->EmitPush(tos);
4469 4470 4471 4472
  leave.Jump();

  // Non-JS objects have class null.
  null.Bind();
4473 4474
  __ LoadRoot(tos, Heap::kNullValueRootIndex);
  frame_->EmitPush(tos);
4475 4476 4477

  // All done.
  leave.Bind();
4478 4479 4480
}


4481
void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
4482
  Register scratch = VirtualFrame::scratch0();
4483
  JumpTarget leave;
4484 4485

  ASSERT(args->length() == 1);
4486
  Load(args->at(0));
4487
  Register tos = frame_->PopToRegister();  // tos contains object.
4488
  // if (object->IsSmi()) return the object.
4489
  __ tst(tos, Operand(kSmiTagMask));
4490
  leave.Branch(eq);
4491
  // It is a heap object - get map. If (!object->IsJSValue()) return the object.
4492
  __ CompareObjectType(tos, scratch, scratch, JS_VALUE_TYPE);
4493
  leave.Branch(ne);
4494
  // Load the value.
4495
  __ ldr(tos, FieldMemOperand(tos, JSValue::kValueOffset));
4496
  leave.Bind();
4497
  frame_->EmitPush(tos);
4498
}
4499 4500


4501
void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
4502 4503
  Register scratch1 = VirtualFrame::scratch0();
  Register scratch2 = VirtualFrame::scratch1();
4504
  JumpTarget leave;
4505 4506

  ASSERT(args->length() == 2);
4507 4508
  Load(args->at(0));    // Load the object.
  Load(args->at(1));    // Load the value.
4509 4510
  Register value = frame_->PopToRegister();
  Register object = frame_->PopToRegister(value);
4511
  // if (object->IsSmi()) return object.
4512
  __ tst(object, Operand(kSmiTagMask));
4513
  leave.Branch(eq);
4514
  // It is a heap object - get map. If (!object->IsJSValue()) return the object.
4515
  __ CompareObjectType(object, scratch1, scratch1, JS_VALUE_TYPE);
4516
  leave.Branch(ne);
4517
  // Store the value.
4518
  __ str(value, FieldMemOperand(object, JSValue::kValueOffset));
4519
  // Update the write barrier.
4520 4521 4522 4523
  __ RecordWrite(object,
                 Operand(JSValue::kValueOffset - kHeapObjectTag),
                 scratch1,
                 scratch2);
4524
  // Leave.
4525
  leave.Bind();
4526
  frame_->EmitPush(value);
4527 4528 4529
}


4530
void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
4531
  ASSERT(args->length() == 1);
4532 4533 4534
  Load(args->at(0));
  Register reg = frame_->PopToRegister();
  __ tst(reg, Operand(kSmiTagMask));
4535 4536
  cc_reg_ = eq;
}
4537

4538

4539 4540 4541
void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
  // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
  ASSERT_EQ(args->length(), 3);
4542
#ifdef ENABLE_LOGGING_AND_PROFILING
4543
  if (ShouldGenerateLog(args->at(0))) {
4544 4545
    Load(args->at(1));
    Load(args->at(2));
4546
    frame_->CallRuntime(Runtime::kLog, 2);
4547
  }
4548
#endif
4549
  frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
4550 4551 4552
}


4553
void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
4554
  ASSERT(args->length() == 1);
4555 4556 4557
  Load(args->at(0));
  Register reg = frame_->PopToRegister();
  __ tst(reg, Operand(kSmiTagMask | 0x80000000u));
4558
  cc_reg_ = eq;
4559 4560 4561
}


4562
// Generates the Math.pow method.
4563
void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
4564 4565 4566
  ASSERT(args->length() == 2);
  Load(args->at(0));
  Load(args->at(1));
4567 4568 4569 4570 4571 4572 4573

  if (!CpuFeatures::IsSupported(VFP3)) {
    frame_->CallRuntime(Runtime::kMath_pow, 2);
    frame_->EmitPush(r0);
  } else {
    CpuFeatures::Scope scope(VFP3);
    JumpTarget runtime, done;
4574
    Label exponent_nonsmi, base_nonsmi, powi, not_minus_half, allocate_return;
4575 4576 4577 4578 4579 4580 4581

    Register scratch1 = VirtualFrame::scratch0();
    Register scratch2 = VirtualFrame::scratch1();

    // Get base and exponent to registers.
    Register exponent = frame_->PopToRegister();
    Register base = frame_->PopToRegister(exponent);
4582
    Register heap_number_map = no_reg;
4583 4584 4585 4586 4587 4588

    // Set the frame for the runtime jump target. The code below jumps to the
    // jump target label so the frame needs to be established before that.
    ASSERT(runtime.entry_frame() == NULL);
    runtime.set_entry_frame(frame_);

4589 4590
    __ BranchOnNotSmi(exponent, &exponent_nonsmi);
    __ BranchOnNotSmi(base, &base_nonsmi);
4591

4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609
    heap_number_map = r6;
    __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);

    // Exponent is a smi and base is a smi. Get the smi value into vfp register
    // d1.
    __ SmiToDoubleVFPRegister(base, d1, scratch1, s0);
    __ b(&powi);

    __ bind(&base_nonsmi);
    // Exponent is smi and base is non smi. Get the double value from the base
    // into vfp register d1.
    __ ObjectToDoubleVFPRegister(base, d1,
                                 scratch1, scratch2, heap_number_map, s0,
                                 runtime.entry_label());

    __ bind(&powi);

    // Load 1.0 into d0.
4610
    __ vmov(d0, 1.0);
4611 4612 4613 4614

    // Get the absolute untagged value of the exponent and use that for the
    // calculation.
    __ mov(scratch1, Operand(exponent, ASR, kSmiTagSize), SetCC);
4615 4616
    // Negate if negative.
    __ rsb(scratch1, scratch1, Operand(0, RelocInfo::NONE), LeaveCC, mi);
4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627
    __ vmov(d2, d0, mi);  // 1.0 needed in d2 later if exponent is negative.

    // Run through all the bits in the exponent. The result is calculated in d0
    // and d1 holds base^(bit^2).
    Label more_bits;
    __ bind(&more_bits);
    __ mov(scratch1, Operand(scratch1, LSR, 1), SetCC);
    __ vmul(d0, d0, d1, cs);  // Multiply with base^(bit^2) if bit is set.
    __ vmul(d1, d1, d1, ne);  // Don't bother calculating next d1 if done.
    __ b(ne, &more_bits);

4628
    // If exponent is positive we are done.
4629
    __ cmp(exponent, Operand(0, RelocInfo::NONE));
4630 4631 4632 4633 4634 4635
    __ b(ge, &allocate_return);

    // If exponent is negative result is 1/result (d2 already holds 1.0 in that
    // case). However if d0 has reached infinity this will not provide the
    // correct result, so call runtime if that is the case.
    __ mov(scratch2, Operand(0x7FF00000));
4636
    __ mov(scratch1, Operand(0, RelocInfo::NONE));
4637 4638 4639 4640 4641
    __ vmov(d1, scratch1, scratch2);  // Load infinity into d1.
    __ vcmp(d0, d1);
    __ vmrs(pc);
    runtime.Branch(eq);  // d0 reached infinity.
    __ vdiv(d0, d2, d0);
4642 4643 4644
    __ b(&allocate_return);

    __ bind(&exponent_nonsmi);
4645 4646 4647
    // Special handling of raising to the power of -0.5 and 0.5. First check
    // that the value is a heap number and that the lower bits (which for both
    // values are zero).
4648
    heap_number_map = r6;
4649 4650 4651 4652 4653 4654 4655 4656
    __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
    __ ldr(scratch1, FieldMemOperand(exponent, HeapObject::kMapOffset));
    __ ldr(scratch2, FieldMemOperand(exponent, HeapNumber::kMantissaOffset));
    __ cmp(scratch1, heap_number_map);
    runtime.Branch(ne);
    __ tst(scratch2, scratch2);
    runtime.Branch(ne);

4657
    // Load the higher bits (which contains the floating point exponent).
4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670
    __ ldr(scratch1, FieldMemOperand(exponent, HeapNumber::kExponentOffset));

    // Compare exponent with -0.5.
    __ cmp(scratch1, Operand(0xbfe00000));
    __ b(ne, &not_minus_half);

    // Get the double value from the base into vfp register d0.
    __ ObjectToDoubleVFPRegister(base, d0,
                                 scratch1, scratch2, heap_number_map, s0,
                                 runtime.entry_label(),
                                 AVOID_NANS_AND_INFINITIES);

    // Load 1.0 into d2.
4671
    __ vmov(d2, 1.0);
4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691

    // Calculate the reciprocal of the square root. 1/sqrt(x) = sqrt(1/x).
    __ vdiv(d0, d2, d0);
    __ vsqrt(d0, d0);

    __ b(&allocate_return);

    __ bind(&not_minus_half);
    // Compare exponent with 0.5.
    __ cmp(scratch1, Operand(0x3fe00000));
    runtime.Branch(ne);

      // Get the double value from the base into vfp register d0.
    __ ObjectToDoubleVFPRegister(base, d0,
                                 scratch1, scratch2, heap_number_map, s0,
                                 runtime.entry_label(),
                                 AVOID_NANS_AND_INFINITIES);
    __ vsqrt(d0, d0);

    __ bind(&allocate_return);
4692 4693 4694 4695
    Register scratch3 = r5;
    __ AllocateHeapNumberWithValue(scratch3, d0, scratch1, scratch2,
                                   heap_number_map, runtime.entry_label());
    __ mov(base, scratch3);
4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708
    done.Jump();

    runtime.Bind();

    // Push back the arguments again for the runtime call.
    frame_->EmitPush(base);
    frame_->EmitPush(exponent);
    frame_->CallRuntime(Runtime::kMath_pow, 2);
    __ Move(base, r0);

    done.Bind();
    frame_->EmitPush(base);
  }
4709 4710
}

4711

4712
// Generates the Math.sqrt method.
4713 4714 4715
void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
  ASSERT(args->length() == 1);
  Load(args->at(0));
4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735

  if (!CpuFeatures::IsSupported(VFP3)) {
    frame_->CallRuntime(Runtime::kMath_sqrt, 1);
    frame_->EmitPush(r0);
  } else {
    CpuFeatures::Scope scope(VFP3);
    JumpTarget runtime, done;

    Register scratch1 = VirtualFrame::scratch0();
    Register scratch2 = VirtualFrame::scratch1();

    // Get the value from the frame.
    Register tos = frame_->PopToRegister();

    // Set the frame for the runtime jump target. The code below jumps to the
    // jump target label so the frame needs to be established before that.
    ASSERT(runtime.entry_frame() == NULL);
    runtime.set_entry_frame(frame_);

    Register heap_number_map = r6;
4736
    Register new_heap_number = r5;
4737 4738 4739 4740 4741 4742 4743 4744 4745
    __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);

    // Get the double value from the heap number into vfp register d0.
    __ ObjectToDoubleVFPRegister(tos, d0,
                                 scratch1, scratch2, heap_number_map, s0,
                                 runtime.entry_label());

    // Calculate the square root of d0 and place result in a heap number object.
    __ vsqrt(d0, d0);
4746 4747 4748 4749 4750 4751
    __ AllocateHeapNumberWithValue(new_heap_number,
                                   d0,
                                   scratch1, scratch2,
                                   heap_number_map,
                                   runtime.entry_label());
    __ mov(tos, Operand(new_heap_number));
4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762
    done.Jump();

    runtime.Bind();
    // Push back the argument again for the runtime call.
    frame_->EmitPush(tos);
    frame_->CallRuntime(Runtime::kMath_sqrt, 1);
    __ Move(tos, r0);

    done.Bind();
    frame_->EmitPush(tos);
  }
4763 4764 4765
}


4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816
class DeferredStringCharCodeAt : public DeferredCode {
 public:
  DeferredStringCharCodeAt(Register object,
                           Register index,
                           Register scratch,
                           Register result)
      : result_(result),
        char_code_at_generator_(object,
                                index,
                                scratch,
                                result,
                                &need_conversion_,
                                &need_conversion_,
                                &index_out_of_range_,
                                STRING_INDEX_IS_NUMBER) {}

  StringCharCodeAtGenerator* fast_case_generator() {
    return &char_code_at_generator_;
  }

  virtual void Generate() {
    VirtualFrameRuntimeCallHelper call_helper(frame_state());
    char_code_at_generator_.GenerateSlow(masm(), call_helper);

    __ bind(&need_conversion_);
    // Move the undefined value into the result register, which will
    // trigger conversion.
    __ LoadRoot(result_, Heap::kUndefinedValueRootIndex);
    __ jmp(exit_label());

    __ bind(&index_out_of_range_);
    // When the index is out of range, the spec requires us to return
    // NaN.
    __ LoadRoot(result_, Heap::kNanValueRootIndex);
    __ jmp(exit_label());
  }

 private:
  Register result_;

  Label need_conversion_;
  Label index_out_of_range_;

  StringCharCodeAtGenerator char_code_at_generator_;
};


// This generates code that performs a String.prototype.charCodeAt() call
// or returns a smi in order to trigger conversion.
void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
  Comment(masm_, "[ GenerateStringCharCodeAt");
4817
  ASSERT(args->length() == 2);
4818

4819 4820
  Load(args->at(0));
  Load(args->at(1));
4821

4822 4823
  Register index = frame_->PopToRegister();
  Register object = frame_->PopToRegister(index);
4824 4825

  // We need two extra registers.
4826 4827
  Register scratch = VirtualFrame::scratch0();
  Register result = VirtualFrame::scratch1();
4828

4829 4830 4831 4832 4833 4834 4835
  DeferredStringCharCodeAt* deferred =
      new DeferredStringCharCodeAt(object,
                                   index,
                                   scratch,
                                   result);
  deferred->fast_case_generator()->GenerateFast(masm_);
  deferred->BindExit();
4836
  frame_->EmitPush(result);
4837 4838 4839
}


4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862
class DeferredStringCharFromCode : public DeferredCode {
 public:
  DeferredStringCharFromCode(Register code,
                             Register result)
      : char_from_code_generator_(code, result) {}

  StringCharFromCodeGenerator* fast_case_generator() {
    return &char_from_code_generator_;
  }

  virtual void Generate() {
    VirtualFrameRuntimeCallHelper call_helper(frame_state());
    char_from_code_generator_.GenerateSlow(masm(), call_helper);
  }

 private:
  StringCharFromCodeGenerator char_from_code_generator_;
};


// Generates code for creating a one-char string from a char code.
void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
  Comment(masm_, "[ GenerateStringCharFromCode");
4863 4864
  ASSERT(args->length() == 1);

4865
  Load(args->at(0));
4866

4867 4868
  Register result = frame_->GetTOSRegister();
  Register code = frame_->PopToRegister(result);
4869

4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932
  DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
      code, result);
  deferred->fast_case_generator()->GenerateFast(masm_);
  deferred->BindExit();
  frame_->EmitPush(result);
}


class DeferredStringCharAt : public DeferredCode {
 public:
  DeferredStringCharAt(Register object,
                       Register index,
                       Register scratch1,
                       Register scratch2,
                       Register result)
      : result_(result),
        char_at_generator_(object,
                           index,
                           scratch1,
                           scratch2,
                           result,
                           &need_conversion_,
                           &need_conversion_,
                           &index_out_of_range_,
                           STRING_INDEX_IS_NUMBER) {}

  StringCharAtGenerator* fast_case_generator() {
    return &char_at_generator_;
  }

  virtual void Generate() {
    VirtualFrameRuntimeCallHelper call_helper(frame_state());
    char_at_generator_.GenerateSlow(masm(), call_helper);

    __ bind(&need_conversion_);
    // Move smi zero into the result register, which will trigger
    // conversion.
    __ mov(result_, Operand(Smi::FromInt(0)));
    __ jmp(exit_label());

    __ bind(&index_out_of_range_);
    // When the index is out of range, the spec requires us to return
    // the empty string.
    __ LoadRoot(result_, Heap::kEmptyStringRootIndex);
    __ jmp(exit_label());
  }

 private:
  Register result_;

  Label need_conversion_;
  Label index_out_of_range_;

  StringCharAtGenerator char_at_generator_;
};


// This generates code that performs a String.prototype.charAt() call
// or returns a smi in order to trigger conversion.
void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
  Comment(masm_, "[ GenerateStringCharAt");
  ASSERT(args->length() == 2);

4933 4934
  Load(args->at(0));
  Load(args->at(1));
4935

4936 4937
  Register index = frame_->PopToRegister();
  Register object = frame_->PopToRegister(index);
4938 4939

  // We need three extra registers.
4940 4941 4942 4943
  Register scratch1 = VirtualFrame::scratch0();
  Register scratch2 = VirtualFrame::scratch1();
  // Use r6 without notifying the virtual frame.
  Register result = r6;
4944 4945 4946 4947 4948 4949 4950 4951 4952

  DeferredStringCharAt* deferred =
      new DeferredStringCharAt(object,
                               index,
                               scratch1,
                               scratch2,
                               result);
  deferred->fast_case_generator()->GenerateFast(masm_);
  deferred->BindExit();
4953
  frame_->EmitPush(result);
4954 4955 4956
}


4957
void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
4958
  ASSERT(args->length() == 1);
4959
  Load(args->at(0));
4960
  JumpTarget answer;
4961 4962 4963
  // We need the CC bits to come out as not_equal in the case where the
  // object is a smi.  This can't be done with the usual test opcode so
  // we use XOR to get the right CC bits.
4964 4965 4966 4967
  Register possible_array = frame_->PopToRegister();
  Register scratch = VirtualFrame::scratch0();
  __ and_(scratch, possible_array, Operand(kSmiTagMask));
  __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC);
4968
  answer.Branch(ne);
4969
  // It is a heap object - get the map. Check if the object is a JS array.
4970
  __ CompareObjectType(possible_array, scratch, scratch, JS_ARRAY_TYPE);
4971
  answer.Bind();
4972 4973
  cc_reg_ = eq;
}
4974 4975


4976 4977
void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
  ASSERT(args->length() == 1);
4978
  Load(args->at(0));
4979 4980 4981 4982
  JumpTarget answer;
  // We need the CC bits to come out as not_equal in the case where the
  // object is a smi.  This can't be done with the usual test opcode so
  // we use XOR to get the right CC bits.
4983 4984 4985 4986
  Register possible_regexp = frame_->PopToRegister();
  Register scratch = VirtualFrame::scratch0();
  __ and_(scratch, possible_regexp, Operand(kSmiTagMask));
  __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC);
4987 4988
  answer.Branch(ne);
  // It is a heap object - get the map. Check if the object is a regexp.
4989
  __ CompareObjectType(possible_regexp, scratch, scratch, JS_REGEXP_TYPE);
4990 4991 4992 4993 4994
  answer.Bind();
  cc_reg_ = eq;
}


4995 4996 4997 4998
void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
  // This generates a fast version of:
  // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
  ASSERT(args->length() == 1);
4999 5000 5001
  Load(args->at(0));
  Register possible_object = frame_->PopToRegister();
  __ tst(possible_object, Operand(kSmiTagMask));
5002 5003 5004
  false_target()->Branch(eq);

  __ LoadRoot(ip, Heap::kNullValueRootIndex);
5005
  __ cmp(possible_object, ip);
5006 5007
  true_target()->Branch(eq);

5008 5009
  Register map_reg = VirtualFrame::scratch0();
  __ ldr(map_reg, FieldMemOperand(possible_object, HeapObject::kMapOffset));
5010
  // Undetectable objects behave like undefined when tested with typeof.
5011 5012
  __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kBitFieldOffset));
  __ tst(possible_object, Operand(1 << Map::kIsUndetectable));
5013
  false_target()->Branch(ne);
5014

5015 5016
  __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
  __ cmp(possible_object, Operand(FIRST_JS_OBJECT_TYPE));
5017
  false_target()->Branch(lt);
5018
  __ cmp(possible_object, Operand(LAST_JS_OBJECT_TYPE));
5019 5020 5021 5022
  cc_reg_ = le;
}


5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040
void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
  // This generates a fast version of:
  // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
  // typeof(arg) == function).
  // It includes undetectable objects (as opposed to IsObject).
  ASSERT(args->length() == 1);
  Load(args->at(0));
  Register value = frame_->PopToRegister();
  __ tst(value, Operand(kSmiTagMask));
  false_target()->Branch(eq);
  // Check that this is an object.
  __ ldr(value, FieldMemOperand(value, HeapObject::kMapOffset));
  __ ldrb(value, FieldMemOperand(value, Map::kInstanceTypeOffset));
  __ cmp(value, Operand(FIRST_JS_OBJECT_TYPE));
  cc_reg_ = ge;
}


5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095
// Deferred code to check whether the String JavaScript object is safe for using
// default value of. This code is called after the bit caching this information
// in the map has been checked with the map for the object in the map_result_
// register. On return the register map_result_ contains 1 for true and 0 for
// false.
class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
 public:
  DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
                                               Register map_result,
                                               Register scratch1,
                                               Register scratch2)
      : object_(object),
        map_result_(map_result),
        scratch1_(scratch1),
        scratch2_(scratch2) { }

  virtual void Generate() {
    Label false_result;

    // Check that map is loaded as expected.
    if (FLAG_debug_code) {
      __ ldr(ip, FieldMemOperand(object_, HeapObject::kMapOffset));
      __ cmp(map_result_, ip);
      __ Assert(eq, "Map not in expected register");
    }

    // Check for fast case object. Generate false result for slow case object.
    __ ldr(scratch1_, FieldMemOperand(object_, JSObject::kPropertiesOffset));
    __ ldr(scratch1_, FieldMemOperand(scratch1_, HeapObject::kMapOffset));
    __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
    __ cmp(scratch1_, ip);
    __ b(eq, &false_result);

    // Look for valueOf symbol in the descriptor array, and indicate false if
    // found. The type is not checked, so if it is a transition it is a false
    // negative.
    __ ldr(map_result_,
           FieldMemOperand(map_result_, Map::kInstanceDescriptorsOffset));
    __ ldr(scratch2_, FieldMemOperand(map_result_, FixedArray::kLengthOffset));
    // map_result_: descriptor array
    // scratch2_: length of descriptor array
    // Calculate the end of the descriptor array.
    STATIC_ASSERT(kSmiTag == 0);
    STATIC_ASSERT(kSmiTagSize == 1);
    STATIC_ASSERT(kPointerSize == 4);
    __ add(scratch1_,
           map_result_,
           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
    __ add(scratch1_,
           scratch1_,
           Operand(scratch2_, LSL, kPointerSizeLog2 - kSmiTagSize));

    // Calculate location of the first key name.
    __ add(map_result_,
           map_result_,
5096
           Operand(FixedArray::kHeaderSize - kHeapObjectTag +
5097 5098 5099 5100
                   DescriptorArray::kFirstIndex * kPointerSize));
    // Loop through all the keys in the descriptor array. If one of these is the
    // symbol valueOf the result is false.
    Label entry, loop;
5101 5102 5103
    // The use of ip to store the valueOf symbol asumes that it is not otherwise
    // used in the loop below.
    __ mov(ip, Operand(Factory::value_of_symbol()));
5104 5105
    __ jmp(&entry);
    __ bind(&loop);
5106 5107
    __ ldr(scratch2_, MemOperand(map_result_, 0));
    __ cmp(scratch2_, ip);
5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143
    __ b(eq, &false_result);
    __ add(map_result_, map_result_, Operand(kPointerSize));
    __ bind(&entry);
    __ cmp(map_result_, Operand(scratch1_));
    __ b(ne, &loop);

    // Reload map as register map_result_ was used as temporary above.
    __ ldr(map_result_, FieldMemOperand(object_, HeapObject::kMapOffset));

    // If a valueOf property is not found on the object check that it's
    // prototype is the un-modified String prototype. If not result is false.
    __ ldr(scratch1_, FieldMemOperand(map_result_, Map::kPrototypeOffset));
    __ tst(scratch1_, Operand(kSmiTagMask));
    __ b(eq, &false_result);
    __ ldr(scratch1_, FieldMemOperand(scratch1_, HeapObject::kMapOffset));
    __ ldr(scratch2_,
           CodeGenerator::ContextOperand(cp, Context::GLOBAL_INDEX));
    __ ldr(scratch2_,
           FieldMemOperand(scratch2_, GlobalObject::kGlobalContextOffset));
    __ ldr(scratch2_,
           CodeGenerator::ContextOperand(
               scratch2_, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
    __ cmp(scratch1_, scratch2_);
    __ b(ne, &false_result);

    // Set the bit in the map to indicate that it has been checked safe for
    // default valueOf and set true result.
    __ ldr(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
    __ orr(scratch1_,
           scratch1_,
           Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
    __ str(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
    __ mov(map_result_, Operand(1));
    __ jmp(exit_label());
    __ bind(&false_result);
    // Set false result.
5144
    __ mov(map_result_, Operand(0, RelocInfo::NONE));
5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186
  }

 private:
  Register object_;
  Register map_result_;
  Register scratch1_;
  Register scratch2_;
};


void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
    ZoneList<Expression*>* args) {
  ASSERT(args->length() == 1);
  Load(args->at(0));
  Register obj = frame_->PopToRegister();  // Pop the string wrapper.
  if (FLAG_debug_code) {
    __ AbortIfSmi(obj);
  }

  // Check whether this map has already been checked to be safe for default
  // valueOf.
  Register map_result = VirtualFrame::scratch0();
  __ ldr(map_result, FieldMemOperand(obj, HeapObject::kMapOffset));
  __ ldrb(ip, FieldMemOperand(map_result, Map::kBitField2Offset));
  __ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
  true_target()->Branch(ne);

  // We need an additional two scratch registers for the deferred code.
  Register scratch1 = VirtualFrame::scratch1();
  // Use r6 without notifying the virtual frame.
  Register scratch2 = r6;

  DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
      new DeferredIsStringWrapperSafeForDefaultValueOf(
          obj, map_result, scratch1, scratch2);
  deferred->Branch(eq);
  deferred->BindExit();
  __ tst(map_result, Operand(map_result));
  cc_reg_ = ne;
}


5187 5188 5189 5190
void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
  // This generates a fast version of:
  // (%_ClassOf(arg) === 'Function')
  ASSERT(args->length() == 1);
5191 5192 5193
  Load(args->at(0));
  Register possible_function = frame_->PopToRegister();
  __ tst(possible_function, Operand(kSmiTagMask));
5194
  false_target()->Branch(eq);
5195 5196 5197
  Register map_reg = VirtualFrame::scratch0();
  Register scratch = VirtualFrame::scratch1();
  __ CompareObjectType(possible_function, map_reg, scratch, JS_FUNCTION_TYPE);
5198 5199 5200 5201
  cc_reg_ = eq;
}


5202 5203
void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
  ASSERT(args->length() == 1);
5204 5205 5206
  Load(args->at(0));
  Register possible_undetectable = frame_->PopToRegister();
  __ tst(possible_undetectable, Operand(kSmiTagMask));
5207
  false_target()->Branch(eq);
5208 5209 5210 5211 5212
  Register scratch = VirtualFrame::scratch0();
  __ ldr(scratch,
         FieldMemOperand(possible_undetectable, HeapObject::kMapOffset));
  __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
  __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5213 5214 5215 5216
  cc_reg_ = ne;
}


5217 5218
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
  ASSERT(args->length() == 0);
5219

5220 5221
  Register scratch0 = VirtualFrame::scratch0();
  Register scratch1 = VirtualFrame::scratch1();
5222
  // Get the frame pointer for the calling frame.
5223
  __ ldr(scratch0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5224 5225

  // Skip the arguments adaptor frame if it exists.
5226 5227 5228 5229 5230
  __ ldr(scratch1,
         MemOperand(scratch0, StandardFrameConstants::kContextOffset));
  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
  __ ldr(scratch0,
         MemOperand(scratch0, StandardFrameConstants::kCallerFPOffset), eq);
5231 5232

  // Check the marker in the calling frame.
5233 5234 5235
  __ ldr(scratch1,
         MemOperand(scratch0, StandardFrameConstants::kMarkerOffset));
  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5236
  cc_reg_ = eq;
5237 5238 5239
}


5240
void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
5241
  ASSERT(args->length() == 0);
5242

5243 5244 5245
  Register tos = frame_->GetTOSRegister();
  Register scratch0 = VirtualFrame::scratch0();
  Register scratch1 = VirtualFrame::scratch1();
5246

5247
  // Check if the calling frame is an arguments adaptor frame.
5248 5249 5250 5251 5252 5253 5254 5255
  __ ldr(scratch0,
         MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
  __ ldr(scratch1,
         MemOperand(scratch0, StandardFrameConstants::kContextOffset));
  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));

  // Get the number of formal parameters.
  __ mov(tos, Operand(Smi::FromInt(scope()->num_parameters())), LeaveCC, ne);
5256 5257 5258

  // Arguments adaptor case: Read the arguments length from the
  // adaptor frame.
5259 5260 5261
  __ ldr(tos,
         MemOperand(scratch0, ArgumentsAdaptorFrameConstants::kLengthOffset),
         eq);
5262

5263
  frame_->EmitPush(tos);
5264
}
5265 5266


5267
void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
5268 5269 5270 5271
  ASSERT(args->length() == 1);

  // Satisfy contract with ArgumentsAccessStub:
  // Load the key into r1 and the formal parameters count into r0.
5272
  Load(args->at(0));
5273 5274
  frame_->PopToR1();
  frame_->SpillAll();
5275
  __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
5276 5277

  // Call the shared stub to get to arguments[key].
5278
  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
5279 5280
  frame_->CallStub(&stub, 0);
  frame_->EmitPush(r0);
5281
}
5282

5283

5284 5285
void CodeGenerator::GenerateRandomHeapNumber(
    ZoneList<Expression*>* args) {
5286
  VirtualFrame::SpilledScope spilled_scope(frame_);
5287
  ASSERT(args->length() == 0);
5288 5289 5290 5291

  Label slow_allocate_heapnumber;
  Label heapnumber_allocated;

5292 5293
  __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
  __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
5294 5295 5296
  __ jmp(&heapnumber_allocated);

  __ bind(&slow_allocate_heapnumber);
5297 5298
  // Allocate a heap number.
  __ CallRuntime(Runtime::kNumberAlloc, 0);
5299
  __ mov(r4, Operand(r0));
5300 5301

  __ bind(&heapnumber_allocated);
5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317

  // Convert 32 random bits in r0 to 0.(32 random bits) in a double
  // by computing:
  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
  if (CpuFeatures::IsSupported(VFP3)) {
    __ PrepareCallCFunction(0, r1);
    __ CallCFunction(ExternalReference::random_uint32_function(), 0);

    CpuFeatures::Scope scope(VFP3);
    // 0x41300000 is the top half of 1.0 x 2^20 as a double.
    // Create this constant using mov/orr to avoid PC relative load.
    __ mov(r1, Operand(0x41000000));
    __ orr(r1, r1, Operand(0x300000));
    // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
    __ vmov(d7, r0, r1);
    // Move 0x4130000000000000 to VFP.
5318
    __ mov(r0, Operand(0, RelocInfo::NONE));
5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331
    __ vmov(d8, r0, r1);
    // Subtract and store the result in the heap number.
    __ vsub(d7, d7, d8);
    __ sub(r0, r4, Operand(kHeapObjectTag));
    __ vstr(d7, r0, HeapNumber::kValueOffset);
    frame_->EmitPush(r4);
  } else {
    __ mov(r0, Operand(r4));
    __ PrepareCallCFunction(1, r1);
    __ CallCFunction(
        ExternalReference::fill_heap_number_with_random_function(), 1);
    frame_->EmitPush(r0);
  }
5332 5333 5334
}


5335 5336 5337 5338 5339 5340
void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
  ASSERT_EQ(2, args->length());

  Load(args->at(0));
  Load(args->at(1));

5341
  StringAddStub stub(NO_STRING_ADD_FLAGS);
5342
  frame_->SpillAll();
5343
  frame_->CallStub(&stub, 2);
5344 5345 5346 5347
  frame_->EmitPush(r0);
}


5348
void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
5349
  ASSERT_EQ(3, args->length());
5350 5351 5352 5353 5354

  Load(args->at(0));
  Load(args->at(1));
  Load(args->at(2));

5355
  SubStringStub stub;
5356
  frame_->SpillAll();
5357
  frame_->CallStub(&stub, 3);
5358 5359 5360 5361
  frame_->EmitPush(r0);
}


5362 5363 5364 5365 5366 5367
void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
  ASSERT_EQ(2, args->length());

  Load(args->at(0));
  Load(args->at(1));

5368
  StringCompareStub stub;
5369
  frame_->SpillAll();
5370
  frame_->CallStub(&stub, 2);
5371 5372 5373 5374
  frame_->EmitPush(r0);
}


5375 5376 5377 5378 5379 5380 5381
void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
  ASSERT_EQ(4, args->length());

  Load(args->at(0));
  Load(args->at(1));
  Load(args->at(2));
  Load(args->at(3));
5382
  RegExpExecStub stub;
5383
  frame_->SpillAll();
5384
  frame_->CallStub(&stub, 4);
5385 5386 5387 5388
  frame_->EmitPush(r0);
}


5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416
void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
  // No stub. This code only occurs a few times in regexp.js.
  const int kMaxInlineLength = 100;
  ASSERT_EQ(3, args->length());
  Load(args->at(0));  // Size of array, smi.
  Load(args->at(1));  // "index" property value.
  Load(args->at(2));  // "input" property value.
  {
    VirtualFrame::SpilledScope spilled_scope(frame_);
    Label slowcase;
    Label done;
    __ ldr(r1, MemOperand(sp, kPointerSize * 2));
    STATIC_ASSERT(kSmiTag == 0);
    STATIC_ASSERT(kSmiTagSize == 1);
    __ tst(r1, Operand(kSmiTagMask));
    __ b(ne, &slowcase);
    __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
    __ b(hi, &slowcase);
    // Smi-tagging is equivalent to multiplying by 2.
    // Allocate RegExpResult followed by FixedArray with size in ebx.
    // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
    // Elements:  [Map][Length][..elements..]
    // Size of JSArray with two in-object properties and the header of a
    // FixedArray.
    int objects_size =
        (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
    __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
    __ add(r2, r5, Operand(objects_size));
5417 5418 5419 5420 5421 5422 5423
    __ AllocateInNewSpace(
        r2,  // In: Size, in words.
        r0,  // Out: Start of allocation (tagged).
        r3,  // Scratch register.
        r4,  // Scratch register.
        &slowcase,
        static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456
    // r0: Start of allocated area, object-tagged.
    // r1: Number of elements in array, as smi.
    // r5: Number of elements, untagged.

    // Set JSArray map to global.regexp_result_map().
    // Set empty properties FixedArray.
    // Set elements to point to FixedArray allocated right after the JSArray.
    // Interleave operations for better latency.
    __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
    __ add(r3, r0, Operand(JSRegExpResult::kSize));
    __ mov(r4, Operand(Factory::empty_fixed_array()));
    __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
    __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
    __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
    __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
    __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));

    // Set input, index and length fields from arguments.
    __ ldm(ia_w, sp, static_cast<RegList>(r2.bit() | r4.bit()));
    __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
    __ add(sp, sp, Operand(kPointerSize));
    __ str(r4, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
    __ str(r2, FieldMemOperand(r0, JSRegExpResult::kInputOffset));

    // Fill out the elements FixedArray.
    // r0: JSArray, tagged.
    // r3: FixedArray, tagged.
    // r5: Number of elements in array, untagged.

    // Set map.
    __ mov(r2, Operand(Factory::fixed_array_map()));
    __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
    // Set FixedArray length.
5457 5458
    __ mov(r6, Operand(r5, LSL, kSmiTagSize));
    __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484
    // Fill contents of fixed-array with the-hole.
    __ mov(r2, Operand(Factory::the_hole_value()));
    __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
    // Fill fixed array elements with hole.
    // r0: JSArray, tagged.
    // r2: the hole.
    // r3: Start of elements in FixedArray.
    // r5: Number of elements to fill.
    Label loop;
    __ tst(r5, Operand(r5));
    __ bind(&loop);
    __ b(le, &done);  // Jump if r1 is negative or zero.
    __ sub(r5, r5, Operand(1), SetCC);
    __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
    __ jmp(&loop);

    __ bind(&slowcase);
    __ CallRuntime(Runtime::kRegExpConstructResult, 3);

    __ bind(&done);
  }
  frame_->Forget(3);
  frame_->EmitPush(r0);
}


5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509
void CodeGenerator::GenerateRegExpCloneResult(ZoneList<Expression*>* args) {
  ASSERT_EQ(1, args->length());

  Load(args->at(0));
  frame_->PopToR0();
  {
    VirtualFrame::SpilledScope spilled_scope(frame_);

    Label done;
    Label call_runtime;
    __ BranchOnSmi(r0, &done);

    // Load JSRegExp map into r1. Check that argument object has this map.
    // Arguments to this function should be results of calling RegExp exec,
    // which is either an unmodified JSRegExpResult or null. Anything not having
    // the unmodified JSRegExpResult map is returned unmodified.
    // This also ensures that elements are fast.

    __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
    __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalContextOffset));
    __ ldr(r1, ContextOperand(r1, Context::REGEXP_RESULT_MAP_INDEX));
    __ ldr(ip, FieldMemOperand(r0, HeapObject::kMapOffset));
    __ cmp(r1, Operand(ip));
    __ b(ne, &done);

5510 5511 5512 5513 5514 5515 5516
    if (FLAG_debug_code) {
      __ LoadRoot(r2, Heap::kEmptyFixedArrayRootIndex);
      __ ldr(ip, FieldMemOperand(r0, JSObject::kPropertiesOffset));
      __ cmp(ip, r2);
      __ Check(eq, "JSRegExpResult: default map but non-empty properties.");
    }

5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551
    // All set, copy the contents to a new object.
    __ AllocateInNewSpace(JSRegExpResult::kSize,
                          r2,
                          r3,
                          r4,
                          &call_runtime,
                          NO_ALLOCATION_FLAGS);
    // Store RegExpResult map as map of allocated object.
    ASSERT(JSRegExpResult::kSize == 6 * kPointerSize);
    // Copy all fields (map is already in r1) from (untagged) r0 to r2.
    // Change map of elements array (ends up in r4) to be a FixedCOWArray.
    __ bic(r0, r0, Operand(kHeapObjectTagMask));
    __ ldm(ib, r0, r3.bit() | r4.bit() | r5.bit() | r6.bit() | r7.bit());
    __ stm(ia, r2,
           r1.bit() | r3.bit() | r4.bit() | r5.bit() | r6.bit() | r7.bit());
    ASSERT(JSRegExp::kElementsOffset == 2 * kPointerSize);
    // Check whether elements array is empty fixed array, and otherwise make
    // it copy-on-write (it never should be empty unless someone is messing
    // with the arguments to the runtime function).
    __ LoadRoot(ip, Heap::kEmptyFixedArrayRootIndex);
    __ add(r0, r2, Operand(kHeapObjectTag));  // Tag result and move it to r0.
    __ cmp(r4, ip);
    __ b(eq, &done);
    __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
    __ str(ip, FieldMemOperand(r4, HeapObject::kMapOffset));
    __ b(&done);
    __ bind(&call_runtime);
    __ push(r0);
    __ CallRuntime(Runtime::kRegExpCloneResult, 1);
    __ bind(&done);
  }
  frame_->EmitPush(r0);
}


5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566
class DeferredSearchCache: public DeferredCode {
 public:
  DeferredSearchCache(Register dst, Register cache, Register key)
      : dst_(dst), cache_(cache), key_(key) {
    set_comment("[ DeferredSearchCache");
  }

  virtual void Generate();

 private:
  Register dst_, cache_, key_;
};


void DeferredSearchCache::Generate() {
5567
  __ Push(cache_, key_);
5568
  __ CallRuntime(Runtime::kGetFromCache, 2);
5569
  __ Move(dst_, r0);
5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582
}


void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
  ASSERT_EQ(2, args->length());

  ASSERT_NE(NULL, args->at(0)->AsLiteral());
  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();

  Handle<FixedArray> jsfunction_result_caches(
      Top::global_context()->jsfunction_result_caches());
  if (jsfunction_result_caches->length() <= cache_id) {
    __ Abort("Attempt to use undefined cache.");
5583
    frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
5584 5585 5586 5587
    return;
  }

  Load(args->at(1));
5588

5589 5590 5591 5592 5593 5594
  frame_->PopToR1();
  frame_->SpillAll();
  Register key = r1;  // Just poped to r1
  Register result = r0;  // Free, as frame has just been spilled.
  Register scratch1 = VirtualFrame::scratch0();
  Register scratch2 = VirtualFrame::scratch1();
5595

5596 5597 5598 5599 5600 5601 5602
  __ ldr(scratch1, ContextOperand(cp, Context::GLOBAL_INDEX));
  __ ldr(scratch1,
         FieldMemOperand(scratch1, GlobalObject::kGlobalContextOffset));
  __ ldr(scratch1,
         ContextOperand(scratch1, Context::JSFUNCTION_RESULT_CACHES_INDEX));
  __ ldr(scratch1,
         FieldMemOperand(scratch1, FixedArray::OffsetOfElementAt(cache_id)));
5603

5604 5605
  DeferredSearchCache* deferred =
      new DeferredSearchCache(result, scratch1, key);
5606 5607 5608

  const int kFingerOffset =
      FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
5609
  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
5610 5611 5612 5613 5614 5615 5616 5617 5618
  __ ldr(result, FieldMemOperand(scratch1, kFingerOffset));
  // result now holds finger offset as a smi.
  __ add(scratch2, scratch1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
  // scratch2 now points to the start of fixed array elements.
  __ ldr(result,
         MemOperand(
             scratch2, result, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
  // Note side effect of PreIndex: scratch2 now points to the key of the pair.
  __ cmp(key, result);
5619 5620
  deferred->Branch(ne);

5621
  __ ldr(result, MemOperand(scratch2, kPointerSize));
5622 5623

  deferred->BindExit();
5624
  frame_->EmitPush(result);
5625 5626 5627
}


5628 5629 5630 5631 5632 5633
void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
  ASSERT_EQ(args->length(), 1);

  // Load the argument on the stack and jump to the runtime.
  Load(args->at(0));

5634
  NumberToStringStub stub;
5635
  frame_->SpillAll();
5636
  frame_->CallStub(&stub, 1);
5637 5638 5639 5640
  frame_->EmitPush(r0);
}


5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662
class DeferredSwapElements: public DeferredCode {
 public:
  DeferredSwapElements(Register object, Register index1, Register index2)
      : object_(object), index1_(index1), index2_(index2) {
    set_comment("[ DeferredSwapElements");
  }

  virtual void Generate();

 private:
  Register object_, index1_, index2_;
};


void DeferredSwapElements::Generate() {
  __ push(object_);
  __ push(index1_);
  __ push(index2_);
  __ CallRuntime(Runtime::kSwapElements, 3);
}


5663 5664 5665 5666 5667 5668 5669 5670 5671
void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
  Comment cmnt(masm_, "[ GenerateSwapElements");

  ASSERT_EQ(3, args->length());

  Load(args->at(0));
  Load(args->at(1));
  Load(args->at(2));

5672 5673
  VirtualFrame::SpilledScope spilled_scope(frame_);

5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695
  Register index2 = r2;
  Register index1 = r1;
  Register object = r0;
  Register tmp1 = r3;
  Register tmp2 = r4;

  frame_->EmitPop(index2);
  frame_->EmitPop(index1);
  frame_->EmitPop(object);

  DeferredSwapElements* deferred =
      new DeferredSwapElements(object, index1, index2);

  // Fetch the map and check if array is in fast case.
  // Check that object doesn't require security checks and
  // has no indexed interceptor.
  __ CompareObjectType(object, tmp1, tmp2, FIRST_JS_OBJECT_TYPE);
  deferred->Branch(lt);
  __ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset));
  __ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
  deferred->Branch(nz);

5696
  // Check the object's elements are in fast case and writable.
5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743
  __ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset));
  __ ldr(tmp2, FieldMemOperand(tmp1, HeapObject::kMapOffset));
  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
  __ cmp(tmp2, ip);
  deferred->Branch(ne);

  // Smi-tagging is equivalent to multiplying by 2.
  STATIC_ASSERT(kSmiTag == 0);
  STATIC_ASSERT(kSmiTagSize == 1);

  // Check that both indices are smis.
  __ mov(tmp2, index1);
  __ orr(tmp2, tmp2, index2);
  __ tst(tmp2, Operand(kSmiTagMask));
  deferred->Branch(nz);

  // Bring the offsets into the fixed array in tmp1 into index1 and
  // index2.
  __ mov(tmp2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
  __ add(index1, tmp2, Operand(index1, LSL, kPointerSizeLog2 - kSmiTagSize));
  __ add(index2, tmp2, Operand(index2, LSL, kPointerSizeLog2 - kSmiTagSize));

  // Swap elements.
  Register tmp3 = object;
  object = no_reg;
  __ ldr(tmp3, MemOperand(tmp1, index1));
  __ ldr(tmp2, MemOperand(tmp1, index2));
  __ str(tmp3, MemOperand(tmp1, index2));
  __ str(tmp2, MemOperand(tmp1, index1));

  Label done;
  __ InNewSpace(tmp1, tmp2, eq, &done);
  // Possible optimization: do a check that both values are Smis
  // (or them and test against Smi mask.)

  __ mov(tmp2, tmp1);
  RecordWriteStub recordWrite1(tmp1, index1, tmp3);
  __ CallStub(&recordWrite1);

  RecordWriteStub recordWrite2(tmp2, index2, tmp3);
  __ CallStub(&recordWrite2);

  __ bind(&done);

  deferred->BindExit();
  __ LoadRoot(tmp1, Heap::kUndefinedValueRootIndex);
  frame_->EmitPush(tmp1);
5744 5745 5746
}


5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762
void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
  Comment cmnt(masm_, "[ GenerateCallFunction");

  ASSERT(args->length() >= 2);

  int n_args = args->length() - 2;  // for receiver and function.
  Load(args->at(0));  // receiver
  for (int i = 0; i < n_args; i++) {
    Load(args->at(i + 1));
  }
  Load(args->at(n_args + 1));  // function
  frame_->CallJSFunction(n_args);
  frame_->EmitPush(r0);
}


5763 5764 5765
void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
  ASSERT_EQ(args->length(), 1);
  Load(args->at(0));
5766 5767 5768 5769 5770 5771 5772
  if (CpuFeatures::IsSupported(VFP3)) {
    TranscendentalCacheStub stub(TranscendentalCache::SIN);
    frame_->SpillAllButCopyTOSToR0();
    frame_->CallStub(&stub, 1);
  } else {
    frame_->CallRuntime(Runtime::kMath_sin, 1);
  }
5773 5774 5775 5776 5777 5778 5779
  frame_->EmitPush(r0);
}


void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
  ASSERT_EQ(args->length(), 1);
  Load(args->at(0));
5780 5781 5782 5783 5784 5785 5786
  if (CpuFeatures::IsSupported(VFP3)) {
    TranscendentalCacheStub stub(TranscendentalCache::COS);
    frame_->SpillAllButCopyTOSToR0();
    frame_->CallStub(&stub, 1);
  } else {
    frame_->CallRuntime(Runtime::kMath_cos, 1);
  }
5787 5788 5789 5790
  frame_->EmitPush(r0);
}


5791
void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
5792 5793 5794
  ASSERT(args->length() == 2);

  // Load the two objects into registers and perform the comparison.
5795 5796 5797 5798 5799
  Load(args->at(0));
  Load(args->at(1));
  Register lhs = frame_->PopToRegister();
  Register rhs = frame_->PopToRegister(lhs);
  __ cmp(lhs, rhs);
5800
  cc_reg_ = eq;
5801 5802 5803
}


5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840
void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
  ASSERT(args->length() == 2);

  // Load the two objects into registers and perform the comparison.
  Load(args->at(0));
  Load(args->at(1));
  Register right = frame_->PopToRegister();
  Register left = frame_->PopToRegister(right);
  Register tmp = frame_->scratch0();
  Register tmp2 = frame_->scratch1();

  // Jumps to done must have the eq flag set if the test is successful
  // and clear if the test has failed.
  Label done;

  // Fail if either is a non-HeapObject.
  __ cmp(left, Operand(right));
  __ b(eq, &done);
  __ and_(tmp, left, Operand(right));
  __ eor(tmp, tmp, Operand(kSmiTagMask));
  __ tst(tmp, Operand(kSmiTagMask));
  __ b(ne, &done);
  __ ldr(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
  __ ldrb(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
  __ cmp(tmp2, Operand(JS_REGEXP_TYPE));
  __ b(ne, &done);
  __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
  __ cmp(tmp, Operand(tmp2));
  __ b(ne, &done);
  __ ldr(tmp, FieldMemOperand(left, JSRegExp::kDataOffset));
  __ ldr(tmp2, FieldMemOperand(right, JSRegExp::kDataOffset));
  __ cmp(tmp, tmp2);
  __ bind(&done);
  cc_reg_ = eq;
}


5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861
void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
  ASSERT(args->length() == 1);
  Load(args->at(0));
  Register value = frame_->PopToRegister();
  Register tmp = frame_->scratch0();
  __ ldr(tmp, FieldMemOperand(value, String::kHashFieldOffset));
  __ tst(tmp, Operand(String::kContainsCachedArrayIndexMask));
  cc_reg_ = eq;
}


void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
  ASSERT(args->length() == 1);
  Load(args->at(0));
  Register value = frame_->PopToRegister();

  __ ldr(value, FieldMemOperand(value, String::kHashFieldOffset));
  __ IndexFromHash(value, value);
  frame_->EmitPush(value);
}

5862

5863
void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
5864 5865 5866 5867 5868 5869 5870 5871
#ifdef DEBUG
  int original_height = frame_->height();
#endif
  if (CheckForInlineRuntimeCall(node)) {
    ASSERT((has_cc() && frame_->height() == original_height) ||
           (!has_cc() && frame_->height() == original_height + 1));
    return;
  }
5872

5873 5874 5875
  ZoneList<Expression*>* args = node->arguments();
  Comment cmnt(masm_, "[ CallRuntime");
  Runtime::Function* function = node->function();
5876

5877
  if (function == NULL) {
5878 5879
    // Prepare stack for calling JS runtime function.
    // Push the builtins object found in the current global object.
5880 5881 5882 5883 5884
    Register scratch = VirtualFrame::scratch0();
    __ ldr(scratch, GlobalObject());
    Register builtins = frame_->GetTOSRegister();
    __ ldr(builtins, FieldMemOperand(scratch, GlobalObject::kBuiltinsOffset));
    frame_->EmitPush(builtins);
5885
  }
5886

5887 5888 5889
  // Push the arguments ("left-to-right").
  int arg_count = args->length();
  for (int i = 0; i < arg_count; i++) {
5890
    Load(args->at(i));
5891
  }
5892

5893 5894
  VirtualFrame::SpilledScope spilled_scope(frame_);

5895
  if (function == NULL) {
5896
    // Call the JS runtime function.
5897
    __ mov(r2, Operand(node->name()));
5898 5899
    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
    Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
5900
    frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
5901
    __ ldr(cp, frame_->Context());
5902
    frame_->EmitPush(r0);
5903 5904 5905 5906
  } else {
    // Call the C runtime function.
    frame_->CallRuntime(function, arg_count);
    frame_->EmitPush(r0);
5907
  }
5908
  ASSERT_EQ(original_height + 1, frame_->height());
5909 5910 5911
}


5912
void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
5913 5914 5915
#ifdef DEBUG
  int original_height = frame_->height();
#endif
5916
  Comment cmnt(masm_, "[ UnaryOperation");
5917

5918
  Token::Value op = node->op();
5919

5920
  if (op == Token::NOT) {
5921
    LoadCondition(node->expression(), false_target(), true_target(), true);
5922 5923
    // LoadCondition may (and usually does) leave a test and branch to
    // be emitted by the caller.  In that case, negate the condition.
5924
    if (has_cc()) cc_reg_ = NegateCondition(cc_reg_);
5925

5926 5927 5928 5929
  } else if (op == Token::DELETE) {
    Property* property = node->expression()->AsProperty();
    Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
    if (property != NULL) {
5930 5931
      Load(property->obj());
      Load(property->key());
5932
      frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
5933
      frame_->EmitPush(r0);
5934

5935
    } else if (variable != NULL) {
5936
      Slot* slot = variable->AsSlot();
5937 5938
      if (variable->is_global()) {
        LoadGlobal();
5939
        frame_->EmitPush(Operand(variable->name()));
5940
        frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
5941
        frame_->EmitPush(r0);
5942

5943 5944
      } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
        // lookup the context holding the named variable
5945
        frame_->EmitPush(cp);
5946
        frame_->EmitPush(Operand(variable->name()));
5947
        frame_->CallRuntime(Runtime::kLookupContext, 2);
5948
        // r0: context
5949
        frame_->EmitPush(r0);
5950
        frame_->EmitPush(Operand(variable->name()));
5951
        frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
5952
        frame_->EmitPush(r0);
5953

5954 5955 5956
      } else {
        // Default: Result of deleting non-global, not dynamically
        // introduced variables is false.
5957
        frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
5958
      }
5959

5960 5961
    } else {
      // Default: Result of deleting expressions is true.
5962
      Load(node->expression());  // may have side-effects
5963
      frame_->Drop();
5964
      frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
5965
    }
5966

5967 5968 5969 5970
  } else if (op == Token::TYPEOF) {
    // Special case for loading the typeof expression; see comment on
    // LoadTypeofExpression().
    LoadTypeofExpression(node->expression());
5971 5972
    frame_->CallRuntime(Runtime::kTypeof, 1);
    frame_->EmitPush(r0);  // r0 has result
5973

5974
  } else {
5975
    bool can_overwrite = node->expression()->ResultOverwriteAllowed();
5976 5977 5978 5979
    UnaryOverwriteMode overwrite =
        can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;

    bool no_negative_zero = node->expression()->no_negative_zero();
5980
    Load(node->expression());
5981 5982 5983 5984 5985 5986 5987 5988
    switch (op) {
      case Token::NOT:
      case Token::DELETE:
      case Token::TYPEOF:
        UNREACHABLE();  // handled above
        break;

      case Token::SUB: {
5989
        frame_->PopToR0();
5990 5991 5992
        GenericUnaryOpStub stub(
            Token::SUB,
            overwrite,
5993
            NO_UNARY_FLAGS,
5994
            no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
5995
        frame_->CallStub(&stub, 0);
5996
        frame_->EmitPush(r0);  // r0 has result
5997 5998
        break;
      }
5999 6000

      case Token::BIT_NOT: {
6001 6002
        Register tos = frame_->PopToRegister();
        JumpTarget not_smi_label;
6003
        JumpTarget continue_label;
6004 6005 6006 6007 6008 6009 6010 6011 6012 6013
        // Smi check.
        __ tst(tos, Operand(kSmiTagMask));
        not_smi_label.Branch(ne);

        __ mvn(tos, Operand(tos));
        __ bic(tos, tos, Operand(kSmiTagMask));  // Bit-clear inverted smi-tag.
        frame_->EmitPush(tos);
        // The fast case is the first to jump to the continue label, so it gets
        // to decide the virtual frame layout.
        continue_label.Jump();
6014

6015 6016 6017
        not_smi_label.Bind();
        frame_->SpillAll();
        __ Move(r0, tos);
6018 6019 6020
        GenericUnaryOpStub stub(Token::BIT_NOT,
                                overwrite,
                                NO_UNARY_SMI_CODE_IN_STUB);
6021
        frame_->CallStub(&stub, 0);
6022
        frame_->EmitPush(r0);
6023

6024
        continue_label.Bind();
6025 6026
        break;
      }
6027 6028

      case Token::VOID:
6029 6030
        frame_->Drop();
        frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
6031 6032 6033
        break;

      case Token::ADD: {
6034
        Register tos = frame_->Peek();
6035
        // Smi check.
6036
        JumpTarget continue_label;
6037
        __ tst(tos, Operand(kSmiTagMask));
6038
        continue_label.Branch(eq);
6039

6040
        frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
6041 6042
        frame_->EmitPush(r0);

6043
        continue_label.Bind();
6044 6045
        break;
      }
6046 6047
      default:
        UNREACHABLE();
6048 6049
    }
  }
6050 6051
  ASSERT(!has_valid_frame() ||
         (has_cc() && frame_->height() == original_height) ||
6052
         (!has_cc() && frame_->height() == original_height + 1));
6053 6054 6055
}


6056
void CodeGenerator::VisitCountOperation(CountOperation* node) {
6057 6058 6059
#ifdef DEBUG
  int original_height = frame_->height();
#endif
6060
  Comment cmnt(masm_, "[ CountOperation");
6061
  VirtualFrame::RegisterAllocationScope scope(this);
6062

6063 6064
  bool is_postfix = node->is_postfix();
  bool is_increment = node->op() == Token::INC;
6065

6066 6067
  Variable* var = node->expression()->AsVariableProxy()->AsVariable();
  bool is_const = (var != NULL && var->mode() == Variable::CONST);
6068 6069
  bool is_slot = (var != NULL && var->mode() == Variable::VAR);

6070
  if (!is_const && is_slot && type_info(var->AsSlot()).IsSmi()) {
6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084
    // The type info declares that this variable is always a Smi.  That
    // means it is a Smi both before and after the increment/decrement.
    // Lets make use of that to make a very minimal count.
    Reference target(this, node->expression(), !is_const);
    ASSERT(!target.is_illegal());
    target.GetValue();  // Pushes the value.
    Register value = frame_->PopToRegister();
    if (is_postfix) frame_->EmitPush(value);
    if (is_increment) {
      __ add(value, value, Operand(Smi::FromInt(1)));
    } else {
      __ sub(value, value, Operand(Smi::FromInt(1)));
    }
    frame_->EmitPush(value);
6085
    target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
6086 6087 6088 6089
    if (is_postfix) frame_->Pop();
    ASSERT_EQ(original_height + 1, frame_->height());
    return;
  }
6090

6091 6092 6093 6094 6095
  // If it's a postfix expression and its result is not ignored and the
  // reference is non-trivial, then push a placeholder on the stack now
  // to hold the result of the expression.
  bool placeholder_pushed = false;
  if (!is_slot && is_postfix) {
6096
    frame_->EmitPush(Operand(Smi::FromInt(0)));
6097
    placeholder_pushed = true;
6098 6099
  }

6100 6101 6102
  // A constant reference is not saved to, so a constant reference is not a
  // compound assignment reference.
  { Reference target(this, node->expression(), !is_const);
6103 6104 6105
    if (target.is_illegal()) {
      // Spoof the virtual frame to have the expected height (one higher
      // than on entry).
6106
      if (!placeholder_pushed) frame_->EmitPush(Operand(Smi::FromInt(0)));
6107
      ASSERT_EQ(original_height + 1, frame_->height());
6108 6109
      return;
    }
6110

6111 6112
    // This pushes 0, 1 or 2 words on the object to be used later when updating
    // the target.  It also pushes the current value of the target.
6113
    target.GetValue();
6114

6115 6116
    JumpTarget slow;
    JumpTarget exit;
6117

6118
    Register value = frame_->PopToRegister();
6119

6120
    // Postfix: Store the old value as the result.
6121
    if (placeholder_pushed) {
6122
      frame_->SetElementAt(value, target.size());
6123 6124 6125 6126
    } else if (is_postfix) {
      frame_->EmitPush(value);
      __ mov(VirtualFrame::scratch0(), value);
      value = VirtualFrame::scratch0();
6127
    }
6128

6129 6130 6131 6132
    // Check for smi operand.
    __ tst(value, Operand(kSmiTagMask));
    slow.Branch(ne);

6133 6134
    // Perform optimistic increment/decrement.
    if (is_increment) {
6135
      __ add(value, value, Operand(Smi::FromInt(1)), SetCC);
6136
    } else {
6137
      __ sub(value, value, Operand(Smi::FromInt(1)), SetCC);
6138 6139
    }

6140
    // If the increment/decrement didn't overflow, we're done.
6141
    exit.Branch(vc);
6142

6143 6144
    // Revert optimistic increment/decrement.
    if (is_increment) {
6145
      __ sub(value, value, Operand(Smi::FromInt(1)));
6146
    } else {
6147
      __ add(value, value, Operand(Smi::FromInt(1)));
6148 6149
    }

6150 6151
    // Slow case: Convert to number.  At this point the
    // value to be incremented is in the value register..
6152
    slow.Bind();
6153 6154 6155 6156

    // Convert the operand to a number.
    frame_->EmitPush(value);

6157
    {
6158
      VirtualFrame::SpilledScope spilled(frame_);
6159
      frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
6160

6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173
      if (is_postfix) {
        // Postfix: store to result (on the stack).
        __ str(r0, frame_->ElementAt(target.size()));
      }

      // Compute the new value.
      frame_->EmitPush(r0);
      frame_->EmitPush(Operand(Smi::FromInt(1)));
      if (is_increment) {
        frame_->CallRuntime(Runtime::kNumberAdd, 2);
      } else {
        frame_->CallRuntime(Runtime::kNumberSub, 2);
      }
6174
    }
6175

6176
    __ Move(value, r0);
6177
    // Store the new value in the target if not const.
6178
    // At this point the answer is in the value register.
6179
    exit.Bind();
6180 6181 6182 6183
    frame_->EmitPush(value);
    // Set the target with the result, leaving the result on
    // top of the stack.  Removes the target from the stack if
    // it has a non-zero size.
6184
    if (!is_const) target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
6185
  }
6186 6187

  // Postfix: Discard the new value and use the old.
6188
  if (is_postfix) frame_->Pop();
6189
  ASSERT_EQ(original_height + 1, frame_->height());
6190 6191 6192
}


6193
void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
6194 6195 6196 6197
  // According to ECMA-262 section 11.11, page 58, the binary logical
  // operators must yield the result of one of the two expressions
  // before any ToBoolean() conversions. This means that the value
  // produced by a && or || operator is not necessarily a boolean.
6198

6199 6200 6201 6202 6203 6204
  // NOTE: If the left hand side produces a materialized value (not in
  // the CC register), we force the right hand side to do the
  // same. This is necessary because we may have to branch to the exit
  // after evaluating the left hand side (due to the shortcut
  // semantics), but the compiler must (statically) know if the result
  // of compiling the binary operation is materialized or not.
6205
  if (node->op() == Token::AND) {
6206
    JumpTarget is_true;
6207
    LoadCondition(node->left(), &is_true, false_target(), false);
6208 6209
    if (has_valid_frame() && !has_cc()) {
      // The left-hand side result is on top of the virtual frame.
6210 6211
      JumpTarget pop_and_continue;
      JumpTarget exit;
6212

6213
      frame_->Dup();
6214 6215 6216 6217 6218
      // Avoid popping the result if it converts to 'false' using the
      // standard ToBoolean() conversion as described in ECMA-262,
      // section 9.2, page 30.
      ToBoolean(&pop_and_continue, &exit);
      Branch(false, &exit);
6219

6220
      // Pop the result of evaluating the first part.
6221
      pop_and_continue.Bind();
6222
      frame_->Pop();
6223

6224
      // Evaluate right side expression.
6225
      is_true.Bind();
6226
      Load(node->right());
6227

6228
      // Exit (always with a materialized value).
6229
      exit.Bind();
6230 6231 6232 6233 6234 6235 6236 6237
    } else if (has_cc() || is_true.is_linked()) {
      // The left-hand side is either (a) partially compiled to
      // control flow with a final branch left to emit or (b) fully
      // compiled to control flow and possibly true.
      if (has_cc()) {
        Branch(false, false_target());
      }
      is_true.Bind();
6238
      LoadCondition(node->right(), true_target(), false_target(), false);
6239 6240 6241
    } else {
      // Nothing to do.
      ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
6242
    }
6243

6244 6245
  } else {
    ASSERT(node->op() == Token::OR);
6246
    JumpTarget is_false;
6247
    LoadCondition(node->left(), true_target(), &is_false, false);
6248 6249
    if (has_valid_frame() && !has_cc()) {
      // The left-hand side result is on top of the virtual frame.
6250 6251
      JumpTarget pop_and_continue;
      JumpTarget exit;
6252

6253
      frame_->Dup();
6254 6255 6256 6257 6258
      // Avoid popping the result if it converts to 'true' using the
      // standard ToBoolean() conversion as described in ECMA-262,
      // section 9.2, page 30.
      ToBoolean(&exit, &pop_and_continue);
      Branch(true, &exit);
6259

6260
      // Pop the result of evaluating the first part.
6261
      pop_and_continue.Bind();
6262
      frame_->Pop();
6263

6264
      // Evaluate right side expression.
6265
      is_false.Bind();
6266
      Load(node->right());
6267

6268
      // Exit (always with a materialized value).
6269
      exit.Bind();
6270 6271 6272 6273 6274 6275 6276 6277
    } else if (has_cc() || is_false.is_linked()) {
      // The left-hand side is either (a) partially compiled to
      // control flow with a final branch left to emit or (b) fully
      // compiled to control flow and possibly false.
      if (has_cc()) {
        Branch(true, true_target());
      }
      is_false.Bind();
6278
      LoadCondition(node->right(), true_target(), false_target(), false);
6279 6280 6281
    } else {
      // Nothing to do.
      ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
6282
    }
6283 6284 6285 6286 6287 6288 6289 6290 6291
  }
}


void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
#ifdef DEBUG
  int original_height = frame_->height();
#endif
  Comment cmnt(masm_, "[ BinaryOperation");
6292

6293 6294
  if (node->op() == Token::AND || node->op() == Token::OR) {
    GenerateLogicalBooleanOperation(node);
6295 6296 6297 6298 6299
  } else {
    // Optimize for the case where (at least) one of the expressions
    // is a literal small integer.
    Literal* lliteral = node->left()->AsLiteral();
    Literal* rliteral = node->right()->AsLiteral();
6300 6301
    // NOTE: The code below assumes that the slow cases (calls to runtime)
    // never return a constant/immutable object.
6302 6303
    bool overwrite_left = node->left()->ResultOverwriteAllowed();
    bool overwrite_right = node->right()->ResultOverwriteAllowed();
6304

6305
    if (rliteral != NULL && rliteral->handle()->IsSmi()) {
6306 6307
      VirtualFrame::RegisterAllocationScope scope(this);
      Load(node->left());
6308
      if (frame_->KnownSmiAt(0)) overwrite_left = false;
6309 6310 6311
      SmiOperation(node->op(),
                   rliteral->handle(),
                   false,
6312
                   overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
6313
    } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
6314 6315
      VirtualFrame::RegisterAllocationScope scope(this);
      Load(node->right());
6316
      if (frame_->KnownSmiAt(0)) overwrite_right = false;
6317 6318 6319
      SmiOperation(node->op(),
                   lliteral->handle(),
                   true,
6320
                   overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
6321
    } else {
6322 6323 6324 6325 6326 6327 6328 6329 6330 6331
      GenerateInlineSmi inline_smi =
          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
      if (lliteral != NULL) {
        ASSERT(!lliteral->handle()->IsSmi());
        inline_smi = DONT_GENERATE_INLINE_SMI;
      }
      if (rliteral != NULL) {
        ASSERT(!rliteral->handle()->IsSmi());
        inline_smi = DONT_GENERATE_INLINE_SMI;
      }
6332
      VirtualFrame::RegisterAllocationScope scope(this);
6333 6334 6335 6336 6337 6338
      OverwriteMode overwrite_mode = NO_OVERWRITE;
      if (overwrite_left) {
        overwrite_mode = OVERWRITE_LEFT;
      } else if (overwrite_right) {
        overwrite_mode = OVERWRITE_RIGHT;
      }
6339 6340
      Load(node->left());
      Load(node->right());
6341
      GenericBinaryOperation(node->op(), overwrite_mode, inline_smi);
6342 6343
    }
  }
6344 6345
  ASSERT(!has_valid_frame() ||
         (has_cc() && frame_->height() == original_height) ||
6346
         (!has_cc() && frame_->height() == original_height + 1));
6347 6348 6349
}


6350
void CodeGenerator::VisitThisFunction(ThisFunction* node) {
6351 6352 6353
#ifdef DEBUG
  int original_height = frame_->height();
#endif
6354
  frame_->EmitPush(MemOperand(frame_->Function()));
6355
  ASSERT_EQ(original_height + 1, frame_->height());
6356
}
6357 6358


6359
void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
6360 6361 6362
#ifdef DEBUG
  int original_height = frame_->height();
#endif
6363
  Comment cmnt(masm_, "[ CompareOperation");
6364

6365 6366
  VirtualFrame::RegisterAllocationScope nonspilled_scope(this);

6367 6368 6369 6370
  // Get the expressions from the node.
  Expression* left = node->left();
  Expression* right = node->right();
  Token::Value op = node->op();
6371

6372 6373 6374
  // To make typeof testing for natives implemented in JavaScript really
  // efficient, we generate special code for expressions of the form:
  // 'typeof <expression> == <string>'.
6375 6376 6377 6378 6379 6380
  UnaryOperation* operation = left->AsUnaryOperation();
  if ((op == Token::EQ || op == Token::EQ_STRICT) &&
      (operation != NULL && operation->op() == Token::TYPEOF) &&
      (right->AsLiteral() != NULL &&
       right->AsLiteral()->handle()->IsString())) {
    Handle<String> check(String::cast(*right->AsLiteral()->handle()));
6381

6382
    // Load the operand, move it to a register.
6383
    LoadTypeofExpression(operation->expression());
6384 6385 6386
    Register tos = frame_->PopToRegister();

    Register scratch = VirtualFrame::scratch0();
6387

6388
    if (check->Equals(Heap::number_symbol())) {
6389
      __ tst(tos, Operand(kSmiTagMask));
6390
      true_target()->Branch(eq);
6391
      __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
6392
      __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
6393
      __ cmp(tos, ip);
6394
      cc_reg_ = eq;
6395

6396
    } else if (check->Equals(Heap::string_symbol())) {
6397
      __ tst(tos, Operand(kSmiTagMask));
6398
      false_target()->Branch(eq);
6399

6400
      __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
6401

6402
      // It can be an undetectable string object.
6403 6404 6405
      __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
      __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
      __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
6406
      false_target()->Branch(eq);
6407

6408 6409
      __ ldrb(scratch, FieldMemOperand(tos, Map::kInstanceTypeOffset));
      __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
6410
      cc_reg_ = lt;
6411

6412
    } else if (check->Equals(Heap::boolean_symbol())) {
6413
      __ LoadRoot(ip, Heap::kTrueValueRootIndex);
6414
      __ cmp(tos, ip);
6415
      true_target()->Branch(eq);
6416
      __ LoadRoot(ip, Heap::kFalseValueRootIndex);
6417
      __ cmp(tos, ip);
6418
      cc_reg_ = eq;
6419

6420
    } else if (check->Equals(Heap::undefined_symbol())) {
6421
      __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
6422
      __ cmp(tos, ip);
6423
      true_target()->Branch(eq);
6424

6425
      __ tst(tos, Operand(kSmiTagMask));
6426
      false_target()->Branch(eq);
6427

6428
      // It can be an undetectable object.
6429 6430 6431 6432
      __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
      __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
      __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
      __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
6433

6434
      cc_reg_ = eq;
6435

6436
    } else if (check->Equals(Heap::function_symbol())) {
6437
      __ tst(tos, Operand(kSmiTagMask));
6438
      false_target()->Branch(eq);
6439 6440
      Register map_reg = scratch;
      __ CompareObjectType(tos, map_reg, tos, JS_FUNCTION_TYPE);
6441 6442
      true_target()->Branch(eq);
      // Regular expressions are callable so typeof == 'function'.
6443
      __ CompareInstanceType(map_reg, tos, JS_REGEXP_TYPE);
6444
      cc_reg_ = eq;
6445

6446
    } else if (check->Equals(Heap::object_symbol())) {
6447
      __ tst(tos, Operand(kSmiTagMask));
6448
      false_target()->Branch(eq);
6449

6450
      __ LoadRoot(ip, Heap::kNullValueRootIndex);
6451
      __ cmp(tos, ip);
6452
      true_target()->Branch(eq);
6453

6454 6455
      Register map_reg = scratch;
      __ CompareObjectType(tos, map_reg, tos, JS_REGEXP_TYPE);
6456 6457
      false_target()->Branch(eq);

6458
      // It can be an undetectable object.
6459 6460 6461
      __ ldrb(tos, FieldMemOperand(map_reg, Map::kBitFieldOffset));
      __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
      __ cmp(tos, Operand(1 << Map::kIsUndetectable));
6462
      false_target()->Branch(eq);
6463

6464 6465
      __ ldrb(tos, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
      __ cmp(tos, Operand(FIRST_JS_OBJECT_TYPE));
6466
      false_target()->Branch(lt);
6467
      __ cmp(tos, Operand(LAST_JS_OBJECT_TYPE));
6468
      cc_reg_ = le;
6469

6470
    } else {
6471 6472
      // Uncommon case: typeof testing against a string literal that is
      // never returned from the typeof operator.
6473
      false_target()->Jump();
6474
    }
6475 6476
    ASSERT(!has_valid_frame() ||
           (has_cc() && frame_->height() == original_height));
6477 6478
    return;
  }
6479

6480 6481
  switch (op) {
    case Token::EQ:
6482
      Comparison(eq, left, right, false);
6483
      break;
6484

6485
    case Token::LT:
6486
      Comparison(lt, left, right);
6487
      break;
6488

6489
    case Token::GT:
6490
      Comparison(gt, left, right);
6491
      break;
6492

6493
    case Token::LTE:
6494
      Comparison(le, left, right);
6495
      break;
6496

6497
    case Token::GTE:
6498
      Comparison(ge, left, right);
6499
      break;
6500

6501
    case Token::EQ_STRICT:
6502
      Comparison(eq, left, right, true);
6503
      break;
6504

6505
    case Token::IN: {
6506 6507
      Load(left);
      Load(right);
6508
      frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
6509
      frame_->EmitPush(r0);
6510
      break;
6511
    }
6512

6513
    case Token::INSTANCEOF: {
6514 6515
      Load(left);
      Load(right);
6516
      InstanceofStub stub;
6517
      frame_->CallStub(&stub, 2);
6518
      // At this point if instanceof succeeded then r0 == 0.
6519
      __ tst(r0, Operand(r0));
6520
      cc_reg_ = eq;
6521
      break;
6522
    }
6523

6524 6525 6526
    default:
      UNREACHABLE();
  }
6527 6528 6529 6530 6531
  ASSERT((has_cc() && frame_->height() == original_height) ||
         (!has_cc() && frame_->height() == original_height + 1));
}


6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565
void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
#ifdef DEBUG
  int original_height = frame_->height();
#endif
  Comment cmnt(masm_, "[ CompareToNull");

  Load(node->expression());
  Register tos = frame_->PopToRegister();
  __ LoadRoot(ip, Heap::kNullValueRootIndex);
  __ cmp(tos, ip);

  // The 'null' value is only equal to 'undefined' if using non-strict
  // comparisons.
  if (!node->is_strict()) {
    true_target()->Branch(eq);
    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
    __ cmp(tos, Operand(ip));
    true_target()->Branch(eq);

    __ tst(tos, Operand(kSmiTagMask));
    false_target()->Branch(eq);

    // It can be an undetectable object.
    __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
    __ ldrb(tos, FieldMemOperand(tos, Map::kBitFieldOffset));
    __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
    __ cmp(tos, Operand(1 << Map::kIsUndetectable));
  }

  cc_reg_ = eq;
  ASSERT(has_cc() && frame_->height() == original_height);
}


6566 6567
class DeferredReferenceGetNamedValue: public DeferredCode {
 public:
6568 6569 6570
  explicit DeferredReferenceGetNamedValue(Register receiver,
                                          Handle<String> name)
      : receiver_(receiver), name_(name) {
6571 6572 6573 6574 6575 6576
    set_comment("[ DeferredReferenceGetNamedValue");
  }

  virtual void Generate();

 private:
6577
  Register receiver_;
6578 6579 6580 6581
  Handle<String> name_;
};


6582 6583 6584
// Convention for this is that on entry the receiver is in a register that
// is not used by the stack.  On exit the answer is found in that same
// register and the stack has the same height.
6585
void DeferredReferenceGetNamedValue::Generate() {
6586 6587 6588 6589 6590
#ifdef DEBUG
  int expected_height = frame_state()->frame()->height();
#endif
  VirtualFrame copied_frame(*frame_state()->frame());
  copied_frame.SpillAll();
6591

6592 6593
  Register scratch1 = VirtualFrame::scratch0();
  Register scratch2 = VirtualFrame::scratch1();
6594
  ASSERT(!receiver_.is(scratch1) && !receiver_.is(scratch2));
6595 6596
  __ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2);
  __ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2);
6597

6598 6599
  // Ensure receiver in r0 and name in r2 to match load ic calling convention.
  __ Move(r0, receiver_);
6600
  __ mov(r2, Operand(name_));
6601

6602 6603 6604 6605 6606 6607
  // The rest of the instructions in the deferred code must be together.
  { Assembler::BlockConstPoolScope block_const_pool(masm_);
    Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
    __ Call(ic, RelocInfo::CODE_TARGET);
    // The call must be followed by a nop(1) instruction to indicate that the
    // in-object has been inlined.
6608
    __ nop(PROPERTY_ACCESS_INLINED);
6609

6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620
    // At this point the answer is in r0.  We move it to the expected register
    // if necessary.
    __ Move(receiver_, r0);

    // Now go back to the frame that we entered with.  This will not overwrite
    // the receiver register since that register was not in use when we came
    // in.  The instructions emitted by this merge are skipped over by the
    // inline load patching mechanism when looking for the branch instruction
    // that tells it where the code to patch is.
    copied_frame.MergeTo(frame_state()->frame());

6621 6622 6623 6624 6625
    // Block the constant pool for one more instruction after leaving this
    // constant pool block scope to include the branch instruction ending the
    // deferred code.
    __ BlockConstPoolFor(1);
  }
6626
  ASSERT_EQ(expected_height, frame_state()->frame()->height());
6627 6628 6629 6630 6631
}


class DeferredReferenceGetKeyedValue: public DeferredCode {
 public:
6632 6633
  DeferredReferenceGetKeyedValue(Register key, Register receiver)
      : key_(key), receiver_(receiver) {
6634 6635 6636 6637
    set_comment("[ DeferredReferenceGetKeyedValue");
  }

  virtual void Generate();
6638 6639 6640 6641

 private:
  Register key_;
  Register receiver_;
6642 6643 6644
};


6645 6646
// Takes key and register in r0 and r1 or vice versa.  Returns result
// in r0.
6647
void DeferredReferenceGetKeyedValue::Generate() {
6648 6649 6650
  ASSERT((key_.is(r0) && receiver_.is(r1)) ||
         (key_.is(r1) && receiver_.is(r0)));

6651 6652 6653
  VirtualFrame copied_frame(*frame_state()->frame());
  copied_frame.SpillAll();

6654 6655 6656 6657
  Register scratch1 = VirtualFrame::scratch0();
  Register scratch2 = VirtualFrame::scratch1();
  __ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2);
  __ IncrementCounter(&Counters::keyed_load_inline_miss, 1, scratch1, scratch2);
6658

6659 6660 6661 6662 6663 6664
  // Ensure key in r0 and receiver in r1 to match keyed load ic calling
  // convention.
  if (key_.is(r1)) {
    __ Swap(r0, r1, ip);
  }

6665 6666
  // The rest of the instructions in the deferred code must be together.
  { Assembler::BlockConstPoolScope block_const_pool(masm_);
6667
    // Call keyed load IC. It has the arguments key and receiver in r0 and r1.
6668 6669 6670 6671
    Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
    __ Call(ic, RelocInfo::CODE_TARGET);
    // The call must be followed by a nop instruction to indicate that the
    // keyed load has been inlined.
6672 6673
    __ nop(PROPERTY_ACCESS_INLINED);

6674 6675 6676 6677 6678 6679 6680
    // Now go back to the frame that we entered with.  This will not overwrite
    // the receiver or key registers since they were not in use when we came
    // in.  The instructions emitted by this merge are skipped over by the
    // inline load patching mechanism when looking for the branch instruction
    // that tells it where the code to patch is.
    copied_frame.MergeTo(frame_state()->frame());

6681 6682 6683 6684 6685 6686 6687 6688 6689 6690
    // Block the constant pool for one more instruction after leaving this
    // constant pool block scope to include the branch instruction ending the
    // deferred code.
    __ BlockConstPoolFor(1);
  }
}


class DeferredReferenceSetKeyedValue: public DeferredCode {
 public:
6691 6692 6693 6694
  DeferredReferenceSetKeyedValue(Register value,
                                 Register key,
                                 Register receiver)
      : value_(value), key_(key), receiver_(receiver) {
6695 6696 6697 6698
    set_comment("[ DeferredReferenceSetKeyedValue");
  }

  virtual void Generate();
6699 6700 6701 6702 6703

 private:
  Register value_;
  Register key_;
  Register receiver_;
6704 6705 6706 6707 6708 6709 6710 6711 6712 6713
};


void DeferredReferenceSetKeyedValue::Generate() {
  Register scratch1 = VirtualFrame::scratch0();
  Register scratch2 = VirtualFrame::scratch1();
  __ DecrementCounter(&Counters::keyed_store_inline, 1, scratch1, scratch2);
  __ IncrementCounter(
      &Counters::keyed_store_inline_miss, 1, scratch1, scratch2);

6714 6715 6716 6717 6718 6719 6720
  // Ensure value in r0, key in r1 and receiver in r2 to match keyed store ic
  // calling convention.
  if (value_.is(r1)) {
    __ Swap(r0, r1, ip);
  }
  ASSERT(receiver_.is(r2));

6721 6722
  // The rest of the instructions in the deferred code must be together.
  { Assembler::BlockConstPoolScope block_const_pool(masm_);
6723 6724
    // Call keyed store IC. It has the arguments value, key and receiver in r0,
    // r1 and r2.
6725 6726 6727 6728 6729
    Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
    __ Call(ic, RelocInfo::CODE_TARGET);
    // The call must be followed by a nop instruction to indicate that the
    // keyed store has been inlined.
    __ nop(PROPERTY_ACCESS_INLINED);
6730

6731 6732 6733 6734 6735
    // Block the constant pool for one more instruction after leaving this
    // constant pool block scope to include the branch instruction ending the
    // deferred code.
    __ BlockConstPoolFor(1);
  }
6736 6737 6738
}


6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756
class DeferredReferenceSetNamedValue: public DeferredCode {
 public:
  DeferredReferenceSetNamedValue(Register value,
                                 Register receiver,
                                 Handle<String> name)
      : value_(value), receiver_(receiver), name_(name) {
    set_comment("[ DeferredReferenceSetNamedValue");
  }

  virtual void Generate();

 private:
  Register value_;
  Register receiver_;
  Handle<String> name_;
};


6757 6758
// Takes value in r0, receiver in r1 and returns the result (the
// value) in r0.
6759
void DeferredReferenceSetNamedValue::Generate() {
6760 6761 6762 6763
  // Record the entry frame and spill.
  VirtualFrame copied_frame(*frame_state()->frame());
  copied_frame.SpillAll();

6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778
  // Ensure value in r0, receiver in r1 to match store ic calling
  // convention.
  ASSERT(value_.is(r0) && receiver_.is(r1));
  __ mov(r2, Operand(name_));

  // The rest of the instructions in the deferred code must be together.
  { Assembler::BlockConstPoolScope block_const_pool(masm_);
    // Call keyed store IC. It has the arguments value, key and receiver in r0,
    // r1 and r2.
    Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
    __ Call(ic, RelocInfo::CODE_TARGET);
    // The call must be followed by a nop instruction to indicate that the
    // named store has been inlined.
    __ nop(PROPERTY_ACCESS_INLINED);

6779 6780 6781 6782 6783 6784
    // Go back to the frame we entered with. The instructions
    // generated by this merge are skipped over by the inline store
    // patching mechanism when looking for the branch instruction that
    // tells it where the code to patch is.
    copied_frame.MergeTo(frame_state()->frame());

6785 6786 6787 6788 6789 6790 6791 6792
    // Block the constant pool for one more instruction after leaving this
    // constant pool block scope to include the branch instruction ending the
    // deferred code.
    __ BlockConstPoolFor(1);
  }
}


6793
// Consumes the top of stack (the receiver) and pushes the result instead.
6794 6795 6796 6797
void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
    Comment cmnt(masm(), "[ Load from named Property");
    // Setup the name register and call load IC.
6798 6799 6800 6801
    frame_->CallLoadIC(name,
                       is_contextual
                           ? RelocInfo::CODE_TARGET_CONTEXT
                           : RelocInfo::CODE_TARGET);
6802
    frame_->EmitPush(r0);  // Push answer.
6803
  } else {
6804
    // Inline the in-object property case.
6805 6806
    Comment cmnt(masm(), "[ Inlined named property load");

6807 6808 6809 6810 6811
    // Counter will be decremented in the deferred code. Placed here to avoid
    // having it in the instruction stream below where patching will occur.
    __ IncrementCounter(&Counters::named_load_inline, 1,
                        frame_->scratch0(), frame_->scratch1());

6812 6813 6814 6815
    // The following instructions are the inlined load of an in-object property.
    // Parts of this code is patched, so the exact instructions generated needs
    // to be fixed. Therefore the instruction pool is blocked when generating
    // this code
6816 6817

    // Load the receiver from the stack.
6818
    Register receiver = frame_->PopToRegister();
6819 6820

    DeferredReferenceGetNamedValue* deferred =
6821
        new DeferredReferenceGetNamedValue(receiver, name);
6822

6823
#ifdef DEBUG
6824
    int kInlinedNamedLoadInstructions = 7;
6825 6826 6827 6828
    Label check_inlined_codesize;
    masm_->bind(&check_inlined_codesize);
#endif

6829
    { Assembler::BlockConstPoolScope block_const_pool(masm_);
6830
      // Check that the receiver is a heap object.
6831
      __ tst(receiver, Operand(kSmiTagMask));
6832 6833
      deferred->Branch(eq);

6834 6835 6836
      Register scratch = VirtualFrame::scratch0();
      Register scratch2 = VirtualFrame::scratch1();

6837
      // Check the map. The null map used below is patched by the inline cache
6838 6839 6840 6841
      // code.  Therefore we can't use a LoadRoot call.
      __ ldr(scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
      __ mov(scratch2, Operand(Factory::null_value()));
      __ cmp(scratch, scratch2);
6842 6843
      deferred->Branch(ne);

6844
      // Initially use an invalid index. The index will be patched by the
6845
      // inline cache code.
6846
      __ ldr(receiver, MemOperand(receiver, 0));
6847

6848 6849 6850 6851
      // Make sure that the expected number of instructions are generated.
      ASSERT_EQ(kInlinedNamedLoadInstructions,
                masm_->InstructionsGeneratedSince(&check_inlined_codesize));
    }
6852 6853

    deferred->BindExit();
6854 6855 6856
    // At this point the receiver register has the result, either from the
    // deferred code or from the inlined code.
    frame_->EmitPush(receiver);
6857 6858 6859 6860
  }
}


6861 6862
void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
#ifdef DEBUG
6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900
  int expected_height = frame()->height() - (is_contextual ? 1 : 2);
#endif

  Result result;
  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
    frame()->CallStoreIC(name, is_contextual);
  } else {
    // Inline the in-object property case.
    JumpTarget slow, done;

    // Get the value and receiver from the stack.
    frame()->PopToR0();
    Register value = r0;
    frame()->PopToR1();
    Register receiver = r1;

    DeferredReferenceSetNamedValue* deferred =
        new DeferredReferenceSetNamedValue(value, receiver, name);

    // Check that the receiver is a heap object.
    __ tst(receiver, Operand(kSmiTagMask));
    deferred->Branch(eq);

    // The following instructions are the part of the inlined
    // in-object property store code which can be patched. Therefore
    // the exact number of instructions generated must be fixed, so
    // the constant pool is blocked while generating this code.
    { Assembler::BlockConstPoolScope block_const_pool(masm_);
      Register scratch0 = VirtualFrame::scratch0();
      Register scratch1 = VirtualFrame::scratch1();

      // Check the map. Initially use an invalid map to force a
      // failure. The map check will be patched in the runtime system.
      __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));

#ifdef DEBUG
      Label check_inlined_codesize;
      masm_->bind(&check_inlined_codesize);
6901
#endif
6902 6903 6904
      __ mov(scratch0, Operand(Factory::null_value()));
      __ cmp(scratch0, scratch1);
      deferred->Branch(ne);
6905

6906 6907 6908
      int offset = 0;
      __ str(value, MemOperand(receiver, offset));

6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937
      // Update the write barrier and record its size. We do not use
      // the RecordWrite macro here because we want the offset
      // addition instruction first to make it easy to patch.
      Label record_write_start, record_write_done;
      __ bind(&record_write_start);
      // Add offset into the object.
      __ add(scratch0, receiver, Operand(offset));
      // Test that the object is not in the new space.  We cannot set
      // region marks for new space pages.
      __ InNewSpace(receiver, scratch1, eq, &record_write_done);
      // Record the actual write.
      __ RecordWriteHelper(receiver, scratch0, scratch1);
      __ bind(&record_write_done);
      // Clobber all input registers when running with the debug-code flag
      // turned on to provoke errors.
      if (FLAG_debug_code) {
        __ mov(receiver, Operand(BitCast<int32_t>(kZapValue)));
        __ mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
        __ mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
      }
      // Check that this is the first inlined write barrier or that
      // this inlined write barrier has the same size as all the other
      // inlined write barriers.
      ASSERT((inlined_write_barrier_size_ == -1) ||
             (inlined_write_barrier_size_ ==
              masm()->InstructionsGeneratedSince(&record_write_start)));
      inlined_write_barrier_size_ =
          masm()->InstructionsGeneratedSince(&record_write_start);

6938 6939
      // Make sure that the expected number of instructions are generated.
      ASSERT_EQ(GetInlinedNamedStoreInstructionsAfterPatch(),
6940
                masm()->InstructionsGeneratedSince(&check_inlined_codesize));
6941 6942 6943 6944
    }
    deferred->BindExit();
  }
  ASSERT_EQ(expected_height, frame()->height());
6945 6946 6947
}


6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960
void CodeGenerator::EmitKeyedLoad() {
  if (loop_nesting() == 0) {
    Comment cmnt(masm_, "[ Load from keyed property");
    frame_->CallKeyedLoadIC();
  } else {
    // Inline the keyed load.
    Comment cmnt(masm_, "[ Inlined load from keyed property");

    // Counter will be decremented in the deferred code. Placed here to avoid
    // having it in the instruction stream below where patching will occur.
    __ IncrementCounter(&Counters::keyed_load_inline, 1,
                        frame_->scratch0(), frame_->scratch1());

6961
    // Load the key and receiver from the stack.
6962
    bool key_is_known_smi = frame_->KnownSmiAt(0);
6963 6964
    Register key = frame_->PopToRegister();
    Register receiver = frame_->PopToRegister(key);
6965

6966
    // The deferred code expects key and receiver in registers.
6967
    DeferredReferenceGetKeyedValue* deferred =
6968
        new DeferredReferenceGetKeyedValue(key, receiver);
6969 6970

    // Check that the receiver is a heap object.
6971
    __ tst(receiver, Operand(kSmiTagMask));
6972 6973
    deferred->Branch(eq);

6974 6975 6976 6977
    // The following instructions are the part of the inlined load keyed
    // property code which can be patched. Therefore the exact number of
    // instructions generated need to be fixed, so the constant pool is blocked
    // while generating this code.
6978
    { Assembler::BlockConstPoolScope block_const_pool(masm_);
6979 6980
      Register scratch1 = VirtualFrame::scratch0();
      Register scratch2 = VirtualFrame::scratch1();
6981 6982
      // Check the map. The null map used below is patched by the inline cache
      // code.
6983
      __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
6984 6985 6986 6987 6988 6989 6990

      // Check that the key is a smi.
      if (!key_is_known_smi) {
        __ tst(key, Operand(kSmiTagMask));
        deferred->Branch(ne);
      }

6991
#ifdef DEBUG
6992 6993
      Label check_inlined_codesize;
      masm_->bind(&check_inlined_codesize);
6994
#endif
6995 6996
      __ mov(scratch2, Operand(Factory::null_value()));
      __ cmp(scratch1, scratch2);
6997 6998
      deferred->Branch(ne);

6999
      // Get the elements array from the receiver.
7000
      __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
7001
      __ AssertFastElements(scratch1);
7002

7003 7004
      // Check that key is within bounds. Use unsigned comparison to handle
      // negative keys.
7005
      __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
7006
      __ cmp(scratch2, key);
7007 7008
      deferred->Branch(ls);  // Unsigned less equal.

7009 7010 7011 7012 7013
      // Load and check that the result is not the hole (key is a smi).
      __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
      __ add(scratch1,
             scratch1,
             Operand(FixedArray::kHeaderSize - kHeapObjectTag));
7014
      __ ldr(scratch1,
7015 7016
             MemOperand(scratch1, key, LSL,
                        kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
7017
      __ cmp(scratch1, scratch2);
7018 7019
      deferred->Branch(eq);

7020
      __ mov(r0, scratch1);
7021
      // Make sure that the expected number of instructions are generated.
7022
      ASSERT_EQ(GetInlinedKeyedLoadInstructionsAfterPatch(),
7023 7024 7025 7026 7027
                masm_->InstructionsGeneratedSince(&check_inlined_codesize));
    }

    deferred->BindExit();
  }
7028 7029 7030
}


7031 7032
void CodeGenerator::EmitKeyedStore(StaticType* key_type,
                                   WriteBarrierCharacter wb_info) {
7033 7034 7035 7036 7037 7038
  // Generate inlined version of the keyed store if the code is in a loop
  // and the key is likely to be a smi.
  if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
    // Inline the keyed store.
    Comment cmnt(masm_, "[ Inlined store to keyed property");

7039 7040 7041
    Register scratch1 = VirtualFrame::scratch0();
    Register scratch2 = VirtualFrame::scratch1();
    Register scratch3 = r3;
7042 7043 7044 7045

    // Counter will be decremented in the deferred code. Placed here to avoid
    // having it in the instruction stream below where patching will occur.
    __ IncrementCounter(&Counters::keyed_store_inline, 1,
7046 7047
                        scratch1, scratch2);

7048 7049


7050
    // Load the value, key and receiver from the stack.
7051 7052 7053
    bool value_is_harmless = frame_->KnownSmiAt(0);
    if (wb_info == NEVER_NEWSPACE) value_is_harmless = true;
    bool key_is_smi = frame_->KnownSmiAt(1);
7054 7055
    Register value = frame_->PopToRegister();
    Register key = frame_->PopToRegister(value);
7056
    VirtualFrame::SpilledScope spilled(frame_);
7057 7058 7059
    Register receiver = r2;
    frame_->EmitPop(receiver);

7060 7061 7062 7063
#ifdef DEBUG
    bool we_remembered_the_write_barrier = value_is_harmless;
#endif

7064 7065 7066
    // The deferred code expects value, key and receiver in registers.
    DeferredReferenceSetKeyedValue* deferred =
        new DeferredReferenceSetKeyedValue(value, key, receiver);
7067 7068 7069

    // Check that the value is a smi. As this inlined code does not set the
    // write barrier it is only possible to store smi values.
7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080
    if (!value_is_harmless) {
      // If the value is not likely to be a Smi then let's test the fixed array
      // for new space instead.  See below.
      if (wb_info == LIKELY_SMI) {
        __ tst(value, Operand(kSmiTagMask));
        deferred->Branch(ne);
#ifdef DEBUG
        we_remembered_the_write_barrier = true;
#endif
      }
    }
7081

7082 7083 7084 7085 7086
    if (!key_is_smi) {
      // Check that the key is a smi.
      __ tst(key, Operand(kSmiTagMask));
      deferred->Branch(ne);
    }
7087 7088

    // Check that the receiver is a heap object.
7089
    __ tst(receiver, Operand(kSmiTagMask));
7090 7091 7092
    deferred->Branch(eq);

    // Check that the receiver is a JSArray.
7093
    __ CompareObjectType(receiver, scratch1, scratch1, JS_ARRAY_TYPE);
7094 7095 7096 7097
    deferred->Branch(ne);

    // Check that the key is within bounds. Both the key and the length of
    // the JSArray are smis. Use unsigned comparison to handle negative keys.
7098 7099
    __ ldr(scratch1, FieldMemOperand(receiver, JSArray::kLengthOffset));
    __ cmp(scratch1, key);
7100 7101
    deferred->Branch(ls);  // Unsigned less equal.

7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115
    // Get the elements array from the receiver.
    __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
    if (!value_is_harmless && wb_info != LIKELY_SMI) {
      Label ok;
      __ and_(scratch2, scratch1, Operand(ExternalReference::new_space_mask()));
      __ cmp(scratch2, Operand(ExternalReference::new_space_start()));
      __ tst(value, Operand(kSmiTagMask), ne);
      deferred->Branch(ne);
#ifdef DEBUG
      we_remembered_the_write_barrier = true;
#endif
    }
    // Check that the elements array is not a dictionary.
    __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
7116 7117 7118 7119 7120
    // The following instructions are the part of the inlined store keyed
    // property code which can be patched. Therefore the exact number of
    // instructions generated need to be fixed, so the constant pool is blocked
    // while generating this code.
    { Assembler::BlockConstPoolScope block_const_pool(masm_);
7121 7122 7123 7124 7125
#ifdef DEBUG
      Label check_inlined_codesize;
      masm_->bind(&check_inlined_codesize);
#endif

7126 7127 7128 7129 7130
      // Read the fixed array map from the constant pool (not from the root
      // array) so that the value can be patched.  When debugging, we patch this
      // comparison to always fail so that we will hit the IC call in the
      // deferred code which will allow the debugger to break for fast case
      // stores.
7131 7132
      __ mov(scratch3, Operand(Factory::fixed_array_map()));
      __ cmp(scratch2, scratch3);
7133 7134 7135
      deferred->Branch(ne);

      // Store the value.
7136 7137 7138 7139 7140
      __ add(scratch1, scratch1,
             Operand(FixedArray::kHeaderSize - kHeapObjectTag));
      __ str(value,
             MemOperand(scratch1, key, LSL,
                        kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
7141 7142

      // Make sure that the expected number of instructions are generated.
7143
      ASSERT_EQ(kInlinedKeyedStoreInstructionsAfterPatch,
7144 7145 7146
                masm_->InstructionsGeneratedSince(&check_inlined_codesize));
    }

7147 7148
    ASSERT(we_remembered_the_write_barrier);

7149 7150 7151 7152 7153 7154 7155
    deferred->BindExit();
  } else {
    frame()->CallKeyedStoreIC();
  }
}


7156 7157 7158 7159 7160
#ifdef DEBUG
bool CodeGenerator::HasValidEntryRegisters() { return true; }
#endif


7161
#undef __
7162
#define __ ACCESS_MASM(masm)
7163

7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180
Handle<String> Reference::GetName() {
  ASSERT(type_ == NAMED);
  Property* property = expression_->AsProperty();
  if (property == NULL) {
    // Global variable reference treated as a named property reference.
    VariableProxy* proxy = expression_->AsVariableProxy();
    ASSERT(proxy->AsVariable() != NULL);
    ASSERT(proxy->AsVariable()->is_global());
    return proxy->name();
  } else {
    Literal* raw_name = property->key()->AsLiteral();
    ASSERT(raw_name != NULL);
    return Handle<String>(String::cast(*raw_name->handle()));
  }
}


7181 7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201
void Reference::DupIfPersist() {
  if (persist_after_get_) {
    switch (type_) {
      case KEYED:
        cgen_->frame()->Dup2();
        break;
      case NAMED:
        cgen_->frame()->Dup();
        // Fall through.
      case UNLOADED:
      case ILLEGAL:
      case SLOT:
        // Do nothing.
        ;
    }
  } else {
    set_unloaded();
  }
}


7202
void Reference::GetValue() {
7203
  ASSERT(cgen_->HasValidEntryRegisters());
7204 7205 7206 7207 7208
  ASSERT(!is_illegal());
  ASSERT(!cgen_->has_cc());
  MacroAssembler* masm = cgen_->masm();
  Property* property = expression_->AsProperty();
  if (property != NULL) {
7209
    cgen_->CodeForSourcePosition(property->position());
7210 7211 7212 7213 7214
  }

  switch (type_) {
    case SLOT: {
      Comment cmnt(masm, "[ Load from Slot");
7215
      Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
7216
      ASSERT(slot != NULL);
7217
      DupIfPersist();
7218
      cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
7219 7220 7221 7222 7223
      break;
    }

    case NAMED: {
      Variable* var = expression_->AsVariableProxy()->AsVariable();
7224 7225
      bool is_global = var != NULL;
      ASSERT(!is_global || var->is_global());
7226 7227 7228
      Handle<String> name = GetName();
      DupIfPersist();
      cgen_->EmitNamedLoad(name, is_global);
7229 7230 7231 7232
      break;
    }

    case KEYED: {
7233
      ASSERT(property != NULL);
7234
      DupIfPersist();
7235
      cgen_->EmitKeyedLoad();
7236
      cgen_->frame()->EmitPush(r0);
7237 7238 7239 7240 7241 7242 7243 7244 7245
      break;
    }

    default:
      UNREACHABLE();
  }
}


7246
void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) {
7247 7248 7249
  ASSERT(!is_illegal());
  ASSERT(!cgen_->has_cc());
  MacroAssembler* masm = cgen_->masm();
7250
  VirtualFrame* frame = cgen_->frame();
7251 7252
  Property* property = expression_->AsProperty();
  if (property != NULL) {
7253
    cgen_->CodeForSourcePosition(property->position());
7254 7255
  }

7256 7257 7258
  switch (type_) {
    case SLOT: {
      Comment cmnt(masm, "[ Store to Slot");
7259
      Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
7260
      cgen_->StoreToSlot(slot, init_state);
7261
      set_unloaded();
7262
      break;
7263 7264
    }

7265 7266
    case NAMED: {
      Comment cmnt(masm, "[ Store to named Property");
7267
      cgen_->EmitNamedStore(GetName(), false);
7268
      frame->EmitPush(r0);
7269
      set_unloaded();
7270
      break;
7271 7272
    }

7273 7274 7275 7276
    case KEYED: {
      Comment cmnt(masm, "[ Store to keyed Property");
      Property* property = expression_->AsProperty();
      ASSERT(property != NULL);
7277
      cgen_->CodeForSourcePosition(property->position());
7278
      cgen_->EmitKeyedStore(property->key()->type(), wb_info);
7279
      frame->EmitPush(r0);
7280
      set_unloaded();
7281
      break;
7282 7283
    }

7284 7285
    default:
      UNREACHABLE();
7286 7287 7288 7289
  }
}


7290 7291 7292 7293 7294 7295 7296
const char* GenericBinaryOpStub::GetName() {
  if (name_ != NULL) return name_;
  const int len = 100;
  name_ = Bootstrapper::AllocateAutoDeletedArray(len);
  if (name_ == NULL) return "OOM";
  const char* op_name = Token::Name(op_);
  const char* overwrite_name;
7297
  switch (mode_) {
7298 7299 7300 7301
    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
    default: overwrite_name = "UnknownOverwrite"; break;
7302 7303 7304
  }

  OS::SNPrintF(Vector<char>(name_, len),
7305
               "GenericBinaryOpStub_%s_%s%s_%s",
7306 7307
               op_name,
               overwrite_name,
7308 7309
               specialized_on_rhs_ ? "_ConstantRhs" : "",
               BinaryOpIC::GetName(runtime_operands_type_));
7310 7311 7312 7313
  return name_;
}


7314 7315 7316
#undef __

} }  // namespace v8::internal
7317 7318

#endif  // V8_TARGET_ARCH_ARM