Commit a6973777 authored by sgjesse@chromium.org's avatar sgjesse@chromium.org

Reverting 2768.

Review URL: http://codereview.chromium.org/173561

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@2769 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 1c46ee8c
......@@ -133,7 +133,14 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// problem here, because it is always greater than the maximum
// instance size that can be represented in a byte.
ASSERT(Heap::MaxObjectSizeInPagedSpace() >= JSObject::kMaxInstanceSize);
__ AllocateObjectInNewSpace(edi, ebx, edi, no_reg, &rt_call, false);
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address();
__ mov(ebx, Operand::StaticVariable(new_space_allocation_top));
__ add(edi, Operand(ebx)); // Calculate new top
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address();
__ cmp(edi, Operand::StaticVariable(new_space_allocation_limit));
__ j(above_equal, &rt_call);
// Allocated the JSObject, now initialize the fields.
// eax: initial map
// ebx: JSObject
......@@ -166,6 +173,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// ebx: JSObject
// edi: start of next object
__ or_(Operand(ebx), Immediate(kHeapObjectTag));
__ mov(Operand::StaticVariable(new_space_allocation_top), edi);
// Check if a non-empty properties array is needed.
// Allocate and initialize a FixedArray if it is.
......@@ -190,14 +198,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// edx: number of elements in properties array
ASSERT(Heap::MaxObjectSizeInPagedSpace() >
(FixedArray::kHeaderSize + 255*kPointerSize));
__ AllocateObjectInNewSpace(FixedArray::kHeaderSize,
times_pointer_size,
edx,
edi,
ecx,
no_reg,
&undo_allocation,
true);
__ lea(ecx, Operand(edi, edx, times_pointer_size, FixedArray::kHeaderSize));
__ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
__ j(above_equal, &undo_allocation);
__ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
// Initialize the FixedArray.
// ebx: JSObject
......@@ -241,7 +245,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// allocated objects unused properties.
// ebx: JSObject (previous new top)
__ bind(&undo_allocation);
__ UndoAllocationInNewSpace(ebx);
__ xor_(Operand(ebx), Immediate(kHeapObjectTag)); // clear the heap tag
__ mov(Operand::StaticVariable(new_space_allocation_top), ebx);
}
// Allocate the new receiver object using the runtime call.
......
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_IA32_CODEGEN_IA32_INL_H_
#define V8_IA32_CODEGEN_IA32_INL_H_
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
// Platform-specific inline functions.
void DeferredCode::Jump() { __ jmp(&entry_label_); }
void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
GenerateFastMathOp(SIN, args);
}
void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
GenerateFastMathOp(COS, args);
}
#undef __
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_INL_H_
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_IA32_CODEGEN_IA32_INL_H_
#define V8_IA32_CODEGEN_IA32_INL_H_
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
// Platform-specific inline functions.
void DeferredCode::Jump() { __ jmp(&entry_label_); }
void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
GenerateFastMathOp(SIN, args);
}
void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
GenerateFastMathOp(COS, args);
}
#undef __
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_INL_H_
......@@ -6947,18 +6947,21 @@ void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Register result) {
// Allocate heap number in new space.
__ AllocateObjectInNewSpace(HeapNumber::kSize,
result,
scratch1,
scratch2,
need_gc,
false);
// Set the map and tag the result.
ExternalReference allocation_top =
ExternalReference::new_space_allocation_top_address();
ExternalReference allocation_limit =
ExternalReference::new_space_allocation_limit_address();
__ mov(Operand(scratch1), Immediate(allocation_top));
__ mov(result, Operand(scratch1, 0));
__ lea(scratch2, Operand(result, HeapNumber::kSize)); // scratch2: new top
__ cmp(scratch2, Operand::StaticVariable(allocation_limit));
__ j(above, need_gc, not_taken);
__ mov(Operand(scratch1, 0), scratch2); // store new top
__ mov(Operand(result, HeapObject::kMapOffset),
Immediate(Factory::heap_number_map()));
__ or_(Operand(result), Immediate(kHeapObjectTag));
// Tag old top and use as result.
__ add(Operand(result), Immediate(kHeapObjectTag));
}
......
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_IA32_CODEGEN_IA32_H_
#define V8_IA32_CODEGEN_IA32_H_
namespace v8 {
namespace internal {
// Forward declarations
class DeferredCode;
class RegisterAllocator;
class RegisterFile;
enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// -------------------------------------------------------------------------
// Reference support
// A reference is a C++ stack-allocated object that keeps an ECMA
// reference on the execution stack while in scope. For variables
// the reference is empty, indicating that it isn't necessary to
// store state on the stack for keeping track of references to those.
// For properties, we keep either one (named) or two (indexed) values
// on the execution stack to represent the reference.
class Reference BASE_EMBEDDED {
public:
// The values of the types is important, see size().
enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
Reference(CodeGenerator* cgen, Expression* expression);
~Reference();
Expression* expression() const { return expression_; }
Type type() const { return type_; }
void set_type(Type value) {
ASSERT(type_ == ILLEGAL);
type_ = value;
}
// The size the reference takes up on the stack.
int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
bool is_illegal() const { return type_ == ILLEGAL; }
bool is_slot() const { return type_ == SLOT; }
bool is_property() const { return type_ == NAMED || type_ == KEYED; }
// Return the name. Only valid for named property references.
Handle<String> GetName();
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
// the expression stack, and it is left in place with its value above it.
void GetValue(TypeofState typeof_state);
// Like GetValue except that the slot is expected to be written to before
// being read from again. Thae value of the reference may be invalidated,
// causing subsequent attempts to read it to fail.
void TakeValue(TypeofState typeof_state);
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
// on the expression stack. The stored value is left in place (with the
// reference intact below it) to support chained assignments.
void SetValue(InitState init_state);
private:
CodeGenerator* cgen_;
Expression* expression_;
Type type_;
};
// -------------------------------------------------------------------------
// Control destinations.
// A control destination encapsulates a pair of jump targets and a
// flag indicating which one is the preferred fall-through. The
// preferred fall-through must be unbound, the other may be already
// bound (ie, a backward target).
//
// The true and false targets may be jumped to unconditionally or
// control may split conditionally. Unconditional jumping and
// splitting should be emitted in tail position (as the last thing
// when compiling an expression) because they can cause either label
// to be bound or the non-fall through to be jumped to leaving an
// invalid virtual frame.
//
// The labels in the control destination can be extracted and
// manipulated normally without affecting the state of the
// destination.
class ControlDestination BASE_EMBEDDED {
public:
ControlDestination(JumpTarget* true_target,
JumpTarget* false_target,
bool true_is_fall_through)
: true_target_(true_target),
false_target_(false_target),
true_is_fall_through_(true_is_fall_through),
is_used_(false) {
ASSERT(true_is_fall_through ? !true_target->is_bound()
: !false_target->is_bound());
}
// Accessors for the jump targets. Directly jumping or branching to
// or binding the targets will not update the destination's state.
JumpTarget* true_target() const { return true_target_; }
JumpTarget* false_target() const { return false_target_; }
// True if the the destination has been jumped to unconditionally or
// control has been split to both targets. This predicate does not
// test whether the targets have been extracted and manipulated as
// raw jump targets.
bool is_used() const { return is_used_; }
// True if the destination is used and the true target (respectively
// false target) was the fall through. If the target is backward,
// "fall through" included jumping unconditionally to it.
bool true_was_fall_through() const {
return is_used_ && true_is_fall_through_;
}
bool false_was_fall_through() const {
return is_used_ && !true_is_fall_through_;
}
// Emit a branch to one of the true or false targets, and bind the
// other target. Because this binds the fall-through target, it
// should be emitted in tail position (as the last thing when
// compiling an expression).
void Split(Condition cc) {
ASSERT(!is_used_);
if (true_is_fall_through_) {
false_target_->Branch(NegateCondition(cc));
true_target_->Bind();
} else {
true_target_->Branch(cc);
false_target_->Bind();
}
is_used_ = true;
}
// Emit an unconditional jump in tail position, to the true target
// (if the argument is true) or the false target. The "jump" will
// actually bind the jump target if it is forward, jump to it if it
// is backward.
void Goto(bool where) {
ASSERT(!is_used_);
JumpTarget* target = where ? true_target_ : false_target_;
if (target->is_bound()) {
target->Jump();
} else {
target->Bind();
}
is_used_ = true;
true_is_fall_through_ = where;
}
// Mark this jump target as used as if Goto had been called, but
// without generating a jump or binding a label (the control effect
// should have already happened). This is used when the left
// subexpression of the short-circuit boolean operators are
// compiled.
void Use(bool where) {
ASSERT(!is_used_);
ASSERT((where ? true_target_ : false_target_)->is_bound());
is_used_ = true;
true_is_fall_through_ = where;
}
// Swap the true and false targets but keep the same actual label as
// the fall through. This is used when compiling negated
// expressions, where we want to swap the targets but preserve the
// state.
void Invert() {
JumpTarget* temp_target = true_target_;
true_target_ = false_target_;
false_target_ = temp_target;
true_is_fall_through_ = !true_is_fall_through_;
}
private:
// True and false jump targets.
JumpTarget* true_target_;
JumpTarget* false_target_;
// Before using the destination: true if the true target is the
// preferred fall through, false if the false target is. After
// using the destination: true if the true target was actually used
// as the fall through, false if the false target was.
bool true_is_fall_through_;
// True if the Split or Goto functions have been called.
bool is_used_;
};
// -------------------------------------------------------------------------
// Code generation state
// The state is passed down the AST by the code generator (and back up, in
// the form of the state of the jump target pair). It is threaded through
// the call stack. Constructing a state implicitly pushes it on the owning
// code generator's stack of states, and destroying one implicitly pops it.
//
// The code generator state is only used for expressions, so statements have
// the initial state.
class CodeGenState BASE_EMBEDDED {
public:
// Create an initial code generator state. Destroying the initial state
// leaves the code generator with a NULL state.
explicit CodeGenState(CodeGenerator* owner);
// Create a code generator state based on a code generator's current
// state. The new state may or may not be inside a typeof, and has its
// own control destination.
CodeGenState(CodeGenerator* owner,
TypeofState typeof_state,
ControlDestination* destination);
// Destroy a code generator state and restore the owning code generator's
// previous state.
~CodeGenState();
// Accessors for the state.
TypeofState typeof_state() const { return typeof_state_; }
ControlDestination* destination() const { return destination_; }
private:
// The owning code generator.
CodeGenerator* owner_;
// A flag indicating whether we are compiling the immediate subexpression
// of a typeof expression.
TypeofState typeof_state_;
// A control destination in case the expression has a control-flow
// effect.
ControlDestination* destination_;
// The previous state of the owning code generator, restored when
// this state is destroyed.
CodeGenState* previous_;
};
// -------------------------------------------------------------------------
// Arguments allocation mode
enum ArgumentsAllocationMode {
NO_ARGUMENTS_ALLOCATION,
EAGER_ARGUMENTS_ALLOCATION,
LAZY_ARGUMENTS_ALLOCATION
};
// -------------------------------------------------------------------------
// CodeGenerator
class CodeGenerator: public AstVisitor {
public:
// Takes a function literal, generates code for it. This function should only
// be called by compiler.cc.
static Handle<Code> MakeCode(FunctionLiteral* fun,
Handle<Script> script,
bool is_eval);
#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
#endif
static void SetFunctionInfo(Handle<JSFunction> fun,
FunctionLiteral* lit,
bool is_toplevel,
Handle<Script> script);
// Accessors
MacroAssembler* masm() { return masm_; }
VirtualFrame* frame() const { return frame_; }
bool has_valid_frame() const { return frame_ != NULL; }
// Set the virtual frame to be new_frame, with non-frame register
// reference counts given by non_frame_registers. The non-frame
// register reference counts of the old frame are returned in
// non_frame_registers.
void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
void DeleteFrame();
RegisterAllocator* allocator() const { return allocator_; }
CodeGenState* state() { return state_; }
void set_state(CodeGenState* state) { state_ = state; }
void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
bool in_spilled_code() const { return in_spilled_code_; }
void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
private:
// Construction/Destruction
CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
virtual ~CodeGenerator() { delete masm_; }
// Accessors
Scope* scope() const { return scope_; }
bool is_eval() { return is_eval_; }
// Generating deferred code.
void ProcessDeferred();
// State
TypeofState typeof_state() const { return state_->typeof_state(); }
ControlDestination* destination() const { return state_->destination(); }
// Track loop nesting level.
int loop_nesting() const { return loop_nesting_; }
void IncrementLoopNesting() { loop_nesting_++; }
void DecrementLoopNesting() { loop_nesting_--; }
// Node visitors.
void VisitStatements(ZoneList<Statement*>* statements);
#define DEF_VISIT(type) \
void Visit##type(type* node);
AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
// Visit a statement and then spill the virtual frame if control flow can
// reach the end of the statement (ie, it does not exit via break,
// continue, return, or throw). This function is used temporarily while
// the code generator is being transformed.
void VisitAndSpill(Statement* statement);
// Visit a list of statements and then spill the virtual frame if control
// flow can reach the end of the list.
void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
// Main code generation function
void GenCode(FunctionLiteral* fun);
// Generate the return sequence code. Should be called no more than
// once per compiled function, immediately after binding the return
// target (which can not be done more than once).
void GenerateReturnSequence(Result* return_value);
// Returns the arguments allocation mode.
ArgumentsAllocationMode ArgumentsMode() const;
// Store the arguments object and allocate it if necessary.
Result StoreArgumentsObject(bool initial);
// The following are used by class Reference.
void LoadReference(Reference* ref);
void UnloadReference(Reference* ref);
Operand ContextOperand(Register context, int index) const {
return Operand(context, Context::SlotOffset(index));
}
Operand SlotOperand(Slot* slot, Register tmp);
Operand ContextSlotOperandCheckExtensions(Slot* slot,
Result tmp,
JumpTarget* slow);
// Expressions
Operand GlobalObject() const {
return ContextOperand(esi, Context::GLOBAL_INDEX);
}
void LoadCondition(Expression* x,
TypeofState typeof_state,
ControlDestination* destination,
bool force_control);
void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
void LoadGlobal();
void LoadGlobalReceiver();
// Generate code to push the value of an expression on top of the frame
// and then spill the frame fully to memory. This function is used
// temporarily while the code generator is being transformed.
void LoadAndSpill(Expression* expression,
TypeofState typeof_state = NOT_INSIDE_TYPEOF);
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
void LoadFromSlotCheckForArguments(Slot* slot, TypeofState typeof_state);
Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
JumpTarget* slow);
// Store the value on top of the expression stack into a slot, leaving the
// value in place.
void StoreToSlot(Slot* slot, InitState init_state);
// Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof'
// expressions. We are not allowed to throw reference errors for
// non-existing properties of the global object, so we must make it
// look like an explicit property access, instead of an access
// through the context chain.
void LoadTypeofExpression(Expression* x);
// Translate the value on top of the frame into control flow to the
// control destination.
void ToBoolean(ControlDestination* destination);
void GenericBinaryOperation(
Token::Value op,
SmiAnalysis* type,
OverwriteMode overwrite_mode);
// If possible, combine two constant smi values using op to produce
// a smi result, and push it on the virtual frame, all at compile time.
// Returns true if it succeeds. Otherwise it has no effect.
bool FoldConstantSmis(Token::Value op, int left, int right);
// Emit code to perform a binary operation on a constant
// smi and a likely smi. Consumes the Result *operand.
void ConstantSmiBinaryOperation(Token::Value op,
Result* operand,
Handle<Object> constant_operand,
SmiAnalysis* type,
bool reversed,
OverwriteMode overwrite_mode);
// Emit code to perform a binary operation on two likely smis.
// The code to handle smi arguments is produced inline.
// Consumes the Results *left and *right.
void LikelySmiBinaryOperation(Token::Value op,
Result* left,
Result* right,
OverwriteMode overwrite_mode);
void Comparison(Condition cc,
bool strict,
ControlDestination* destination);
// To prevent long attacker-controlled byte sequences, integer constants
// from the JavaScript source are loaded in two parts if they are larger
// than 16 bits.
static const int kMaxSmiInlinedBits = 16;
bool IsUnsafeSmi(Handle<Object> value);
// Load an integer constant x into a register target using
// at most 16 bits of user-controlled data per assembly operation.
void LoadUnsafeSmi(Register target, Handle<Object> value);
void CallWithArguments(ZoneList<Expression*>* arguments, int position);
// Use an optimized version of Function.prototype.apply that avoid
// allocating the arguments object and just copies the arguments
// from the stack.
void CallApplyLazy(Property* apply,
Expression* receiver,
VariableProxy* arguments,
int position);
void CheckStack();
struct InlineRuntimeLUT {
void (CodeGenerator::*method)(ZoneList<Expression*>*);
const char* name;
};
static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
bool CheckForInlineRuntimeCall(CallRuntime* node);
static bool PatchInlineRuntimeEntry(Handle<String> name,
const InlineRuntimeLUT& new_entry,
InlineRuntimeLUT* old_entry);
Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
// Declare global variables and functions in the given array of
// name/value pairs.
void DeclareGlobals(Handle<FixedArray> pairs);
// Instantiate the function boilerplate.
void InstantiateBoilerplate(Handle<JSFunction> boilerplate);
// Support for type checks.
void GenerateIsSmi(ZoneList<Expression*>* args);
void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
void GenerateIsArray(ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
// Support for arguments.length and arguments[?].
void GenerateArgumentsLength(ZoneList<Expression*>* args);
void GenerateArgumentsAccess(ZoneList<Expression*>* args);
// Support for accessing the class and value fields of an object.
void GenerateClassOf(ZoneList<Expression*>* args);
void GenerateValueOf(ZoneList<Expression*>* args);
void GenerateSetValueOf(ZoneList<Expression*>* args);
// Fast support for charCodeAt(n).
void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
void GenerateLog(ZoneList<Expression*>* args);
void GenerateGetFramePointer(ZoneList<Expression*>* args);
// Fast support for Math.random().
void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
// Fast support for Math.sin and Math.cos.
enum MathOp { SIN, COS };
void GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args);
inline void GenerateMathSin(ZoneList<Expression*>* args);
inline void GenerateMathCos(ZoneList<Expression*>* args);
// Methods used to indicate which source code is generated for. Source
// positions are collected by the assembler and emitted with the relocation
// information.
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
void CodeForStatementPosition(AstNode* node);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
// True if the registers are valid for entry to a block. There should
// be no frame-external references to (non-reserved) registers.
bool HasValidEntryRegisters();
#endif
bool is_eval_; // Tells whether code is generated for eval.
Handle<Script> script_;
ZoneList<DeferredCode*> deferred_;
// Assembler
MacroAssembler* masm_; // to generate code
// Code generation state
Scope* scope_;
VirtualFrame* frame_;
RegisterAllocator* allocator_;
CodeGenState* state_;
int loop_nesting_;
// Jump targets.
// The target of the return from the function.
BreakTarget function_return_;
// True if the function return is shadowed (ie, jumping to the target
// function_return_ does not jump to the true function return, but rather
// to some unlinking code).
bool function_return_is_shadowed_;
// True when we are in code that expects the virtual frame to be fully
// spilled. Some virtual frame function are disabled in DEBUG builds when
// called from spilled code, because they do not leave the virtual frame
// in a spilled state.
bool in_spilled_code_;
static InlineRuntimeLUT kInlineRuntimeLUT[];
friend class VirtualFrame;
friend class JumpTarget;
friend class Reference;
friend class Result;
friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
// Flag that indicates whether or not the code that handles smi arguments
// should be placed in the stub, inlined, or omitted entirely.
enum GenericBinaryFlags {
SMI_CODE_IN_STUB,
SMI_CODE_INLINED
};
class GenericBinaryOpStub: public CodeStub {
public:
GenericBinaryOpStub(Token::Value op,
OverwriteMode mode,
GenericBinaryFlags flags)
: op_(op), mode_(mode), flags_(flags) {
use_sse3_ = CpuFeatures::IsSupported(CpuFeatures::SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
void GenerateSmiCode(MacroAssembler* masm, Label* slow);
private:
Token::Value op_;
OverwriteMode mode_;
GenericBinaryFlags flags_;
bool use_sse3_;
const char* GetName();
#ifdef DEBUG
void Print() {
PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
Token::String(op_),
static_cast<int>(mode_),
static_cast<int>(flags_));
}
#endif
// Minor key encoding in 16 bits FSOOOOOOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 12> {};
class SSE3Bits: public BitField<bool, 14, 1> {};
class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
Major MajorKey() { return GenericBinaryOp; }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| FlagBits::encode(flags_)
| SSE3Bits::encode(use_sse3_);
}
void Generate(MacroAssembler* masm);
};
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_IA32_CODEGEN_IA32_H_
#define V8_IA32_CODEGEN_IA32_H_
namespace v8 {
namespace internal {
// Forward declarations
class DeferredCode;
class RegisterAllocator;
class RegisterFile;
enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// -------------------------------------------------------------------------
// Reference support
// A reference is a C++ stack-allocated object that keeps an ECMA
// reference on the execution stack while in scope. For variables
// the reference is empty, indicating that it isn't necessary to
// store state on the stack for keeping track of references to those.
// For properties, we keep either one (named) or two (indexed) values
// on the execution stack to represent the reference.
class Reference BASE_EMBEDDED {
public:
// The values of the types is important, see size().
enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
Reference(CodeGenerator* cgen, Expression* expression);
~Reference();
Expression* expression() const { return expression_; }
Type type() const { return type_; }
void set_type(Type value) {
ASSERT(type_ == ILLEGAL);
type_ = value;
}
// The size the reference takes up on the stack.
int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
bool is_illegal() const { return type_ == ILLEGAL; }
bool is_slot() const { return type_ == SLOT; }
bool is_property() const { return type_ == NAMED || type_ == KEYED; }
// Return the name. Only valid for named property references.
Handle<String> GetName();
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
// the expression stack, and it is left in place with its value above it.
void GetValue(TypeofState typeof_state);
// Like GetValue except that the slot is expected to be written to before
// being read from again. Thae value of the reference may be invalidated,
// causing subsequent attempts to read it to fail.
void TakeValue(TypeofState typeof_state);
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
// on the expression stack. The stored value is left in place (with the
// reference intact below it) to support chained assignments.
void SetValue(InitState init_state);
private:
CodeGenerator* cgen_;
Expression* expression_;
Type type_;
};
// -------------------------------------------------------------------------
// Control destinations.
// A control destination encapsulates a pair of jump targets and a
// flag indicating which one is the preferred fall-through. The
// preferred fall-through must be unbound, the other may be already
// bound (ie, a backward target).
//
// The true and false targets may be jumped to unconditionally or
// control may split conditionally. Unconditional jumping and
// splitting should be emitted in tail position (as the last thing
// when compiling an expression) because they can cause either label
// to be bound or the non-fall through to be jumped to leaving an
// invalid virtual frame.
//
// The labels in the control destination can be extracted and
// manipulated normally without affecting the state of the
// destination.
class ControlDestination BASE_EMBEDDED {
public:
ControlDestination(JumpTarget* true_target,
JumpTarget* false_target,
bool true_is_fall_through)
: true_target_(true_target),
false_target_(false_target),
true_is_fall_through_(true_is_fall_through),
is_used_(false) {
ASSERT(true_is_fall_through ? !true_target->is_bound()
: !false_target->is_bound());
}
// Accessors for the jump targets. Directly jumping or branching to
// or binding the targets will not update the destination's state.
JumpTarget* true_target() const { return true_target_; }
JumpTarget* false_target() const { return false_target_; }
// True if the the destination has been jumped to unconditionally or
// control has been split to both targets. This predicate does not
// test whether the targets have been extracted and manipulated as
// raw jump targets.
bool is_used() const { return is_used_; }
// True if the destination is used and the true target (respectively
// false target) was the fall through. If the target is backward,
// "fall through" included jumping unconditionally to it.
bool true_was_fall_through() const {
return is_used_ && true_is_fall_through_;
}
bool false_was_fall_through() const {
return is_used_ && !true_is_fall_through_;
}
// Emit a branch to one of the true or false targets, and bind the
// other target. Because this binds the fall-through target, it
// should be emitted in tail position (as the last thing when
// compiling an expression).
void Split(Condition cc) {
ASSERT(!is_used_);
if (true_is_fall_through_) {
false_target_->Branch(NegateCondition(cc));
true_target_->Bind();
} else {
true_target_->Branch(cc);
false_target_->Bind();
}
is_used_ = true;
}
// Emit an unconditional jump in tail position, to the true target
// (if the argument is true) or the false target. The "jump" will
// actually bind the jump target if it is forward, jump to it if it
// is backward.
void Goto(bool where) {
ASSERT(!is_used_);
JumpTarget* target = where ? true_target_ : false_target_;
if (target->is_bound()) {
target->Jump();
} else {
target->Bind();
}
is_used_ = true;
true_is_fall_through_ = where;
}
// Mark this jump target as used as if Goto had been called, but
// without generating a jump or binding a label (the control effect
// should have already happened). This is used when the left
// subexpression of the short-circuit boolean operators are
// compiled.
void Use(bool where) {
ASSERT(!is_used_);
ASSERT((where ? true_target_ : false_target_)->is_bound());
is_used_ = true;
true_is_fall_through_ = where;
}
// Swap the true and false targets but keep the same actual label as
// the fall through. This is used when compiling negated
// expressions, where we want to swap the targets but preserve the
// state.
void Invert() {
JumpTarget* temp_target = true_target_;
true_target_ = false_target_;
false_target_ = temp_target;
true_is_fall_through_ = !true_is_fall_through_;
}
private:
// True and false jump targets.
JumpTarget* true_target_;
JumpTarget* false_target_;
// Before using the destination: true if the true target is the
// preferred fall through, false if the false target is. After
// using the destination: true if the true target was actually used
// as the fall through, false if the false target was.
bool true_is_fall_through_;
// True if the Split or Goto functions have been called.
bool is_used_;
};
// -------------------------------------------------------------------------
// Code generation state
// The state is passed down the AST by the code generator (and back up, in
// the form of the state of the jump target pair). It is threaded through
// the call stack. Constructing a state implicitly pushes it on the owning
// code generator's stack of states, and destroying one implicitly pops it.
//
// The code generator state is only used for expressions, so statements have
// the initial state.
class CodeGenState BASE_EMBEDDED {
public:
// Create an initial code generator state. Destroying the initial state
// leaves the code generator with a NULL state.
explicit CodeGenState(CodeGenerator* owner);
// Create a code generator state based on a code generator's current
// state. The new state may or may not be inside a typeof, and has its
// own control destination.
CodeGenState(CodeGenerator* owner,
TypeofState typeof_state,
ControlDestination* destination);
// Destroy a code generator state and restore the owning code generator's
// previous state.
~CodeGenState();
// Accessors for the state.
TypeofState typeof_state() const { return typeof_state_; }
ControlDestination* destination() const { return destination_; }
private:
// The owning code generator.
CodeGenerator* owner_;
// A flag indicating whether we are compiling the immediate subexpression
// of a typeof expression.
TypeofState typeof_state_;
// A control destination in case the expression has a control-flow
// effect.
ControlDestination* destination_;
// The previous state of the owning code generator, restored when
// this state is destroyed.
CodeGenState* previous_;
};
// -------------------------------------------------------------------------
// Arguments allocation mode
enum ArgumentsAllocationMode {
NO_ARGUMENTS_ALLOCATION,
EAGER_ARGUMENTS_ALLOCATION,
LAZY_ARGUMENTS_ALLOCATION
};
// -------------------------------------------------------------------------
// CodeGenerator
class CodeGenerator: public AstVisitor {
public:
// Takes a function literal, generates code for it. This function should only
// be called by compiler.cc.
static Handle<Code> MakeCode(FunctionLiteral* fun,
Handle<Script> script,
bool is_eval);
#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
#endif
static void SetFunctionInfo(Handle<JSFunction> fun,
FunctionLiteral* lit,
bool is_toplevel,
Handle<Script> script);
// Accessors
MacroAssembler* masm() { return masm_; }
VirtualFrame* frame() const { return frame_; }
bool has_valid_frame() const { return frame_ != NULL; }
// Set the virtual frame to be new_frame, with non-frame register
// reference counts given by non_frame_registers. The non-frame
// register reference counts of the old frame are returned in
// non_frame_registers.
void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
void DeleteFrame();
RegisterAllocator* allocator() const { return allocator_; }
CodeGenState* state() { return state_; }
void set_state(CodeGenState* state) { state_ = state; }
void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
bool in_spilled_code() const { return in_spilled_code_; }
void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
private:
// Construction/Destruction
CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
virtual ~CodeGenerator() { delete masm_; }
// Accessors
Scope* scope() const { return scope_; }
bool is_eval() { return is_eval_; }
// Generating deferred code.
void ProcessDeferred();
// State
TypeofState typeof_state() const { return state_->typeof_state(); }
ControlDestination* destination() const { return state_->destination(); }
// Track loop nesting level.
int loop_nesting() const { return loop_nesting_; }
void IncrementLoopNesting() { loop_nesting_++; }
void DecrementLoopNesting() { loop_nesting_--; }
// Node visitors.
void VisitStatements(ZoneList<Statement*>* statements);
#define DEF_VISIT(type) \
void Visit##type(type* node);
AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
// Visit a statement and then spill the virtual frame if control flow can
// reach the end of the statement (ie, it does not exit via break,
// continue, return, or throw). This function is used temporarily while
// the code generator is being transformed.
void VisitAndSpill(Statement* statement);
// Visit a list of statements and then spill the virtual frame if control
// flow can reach the end of the list.
void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
// Main code generation function
void GenCode(FunctionLiteral* fun);
// Generate the return sequence code. Should be called no more than
// once per compiled function, immediately after binding the return
// target (which can not be done more than once).
void GenerateReturnSequence(Result* return_value);
// Returns the arguments allocation mode.
ArgumentsAllocationMode ArgumentsMode() const;
// Store the arguments object and allocate it if necessary.
Result StoreArgumentsObject(bool initial);
// The following are used by class Reference.
void LoadReference(Reference* ref);
void UnloadReference(Reference* ref);
Operand ContextOperand(Register context, int index) const {
return Operand(context, Context::SlotOffset(index));
}
Operand SlotOperand(Slot* slot, Register tmp);
Operand ContextSlotOperandCheckExtensions(Slot* slot,
Result tmp,
JumpTarget* slow);
// Expressions
Operand GlobalObject() const {
return ContextOperand(esi, Context::GLOBAL_INDEX);
}
void LoadCondition(Expression* x,
TypeofState typeof_state,
ControlDestination* destination,
bool force_control);
void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
void LoadGlobal();
void LoadGlobalReceiver();
// Generate code to push the value of an expression on top of the frame
// and then spill the frame fully to memory. This function is used
// temporarily while the code generator is being transformed.
void LoadAndSpill(Expression* expression,
TypeofState typeof_state = NOT_INSIDE_TYPEOF);
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
void LoadFromSlotCheckForArguments(Slot* slot, TypeofState typeof_state);
Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
JumpTarget* slow);
// Store the value on top of the expression stack into a slot, leaving the
// value in place.
void StoreToSlot(Slot* slot, InitState init_state);
// Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof'
// expressions. We are not allowed to throw reference errors for
// non-existing properties of the global object, so we must make it
// look like an explicit property access, instead of an access
// through the context chain.
void LoadTypeofExpression(Expression* x);
// Translate the value on top of the frame into control flow to the
// control destination.
void ToBoolean(ControlDestination* destination);
void GenericBinaryOperation(
Token::Value op,
SmiAnalysis* type,
OverwriteMode overwrite_mode);
// If possible, combine two constant smi values using op to produce
// a smi result, and push it on the virtual frame, all at compile time.
// Returns true if it succeeds. Otherwise it has no effect.
bool FoldConstantSmis(Token::Value op, int left, int right);
// Emit code to perform a binary operation on a constant
// smi and a likely smi. Consumes the Result *operand.
void ConstantSmiBinaryOperation(Token::Value op,
Result* operand,
Handle<Object> constant_operand,
SmiAnalysis* type,
bool reversed,
OverwriteMode overwrite_mode);
// Emit code to perform a binary operation on two likely smis.
// The code to handle smi arguments is produced inline.
// Consumes the Results *left and *right.
void LikelySmiBinaryOperation(Token::Value op,
Result* left,
Result* right,
OverwriteMode overwrite_mode);
void Comparison(Condition cc,
bool strict,
ControlDestination* destination);
// To prevent long attacker-controlled byte sequences, integer constants
// from the JavaScript source are loaded in two parts if they are larger
// than 16 bits.
static const int kMaxSmiInlinedBits = 16;
bool IsUnsafeSmi(Handle<Object> value);
// Load an integer constant x into a register target using
// at most 16 bits of user-controlled data per assembly operation.
void LoadUnsafeSmi(Register target, Handle<Object> value);
void CallWithArguments(ZoneList<Expression*>* arguments, int position);
// Use an optimized version of Function.prototype.apply that avoid
// allocating the arguments object and just copies the arguments
// from the stack.
void CallApplyLazy(Property* apply,
Expression* receiver,
VariableProxy* arguments,
int position);
void CheckStack();
struct InlineRuntimeLUT {
void (CodeGenerator::*method)(ZoneList<Expression*>*);
const char* name;
};
static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
bool CheckForInlineRuntimeCall(CallRuntime* node);
static bool PatchInlineRuntimeEntry(Handle<String> name,
const InlineRuntimeLUT& new_entry,
InlineRuntimeLUT* old_entry);
Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
// Declare global variables and functions in the given array of
// name/value pairs.
void DeclareGlobals(Handle<FixedArray> pairs);
// Instantiate the function boilerplate.
void InstantiateBoilerplate(Handle<JSFunction> boilerplate);
// Support for type checks.
void GenerateIsSmi(ZoneList<Expression*>* args);
void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
void GenerateIsArray(ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
// Support for arguments.length and arguments[?].
void GenerateArgumentsLength(ZoneList<Expression*>* args);
void GenerateArgumentsAccess(ZoneList<Expression*>* args);
// Support for accessing the class and value fields of an object.
void GenerateClassOf(ZoneList<Expression*>* args);
void GenerateValueOf(ZoneList<Expression*>* args);
void GenerateSetValueOf(ZoneList<Expression*>* args);
// Fast support for charCodeAt(n).
void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
void GenerateLog(ZoneList<Expression*>* args);
void GenerateGetFramePointer(ZoneList<Expression*>* args);
// Fast support for Math.random().
void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
// Fast support for Math.sin and Math.cos.
enum MathOp { SIN, COS };
void GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args);
inline void GenerateMathSin(ZoneList<Expression*>* args);
inline void GenerateMathCos(ZoneList<Expression*>* args);
// Methods used to indicate which source code is generated for. Source
// positions are collected by the assembler and emitted with the relocation
// information.
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
void CodeForStatementPosition(AstNode* node);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
// True if the registers are valid for entry to a block. There should
// be no frame-external references to (non-reserved) registers.
bool HasValidEntryRegisters();
#endif
bool is_eval_; // Tells whether code is generated for eval.
Handle<Script> script_;
ZoneList<DeferredCode*> deferred_;
// Assembler
MacroAssembler* masm_; // to generate code
// Code generation state
Scope* scope_;
VirtualFrame* frame_;
RegisterAllocator* allocator_;
CodeGenState* state_;
int loop_nesting_;
// Jump targets.
// The target of the return from the function.
BreakTarget function_return_;
// True if the function return is shadowed (ie, jumping to the target
// function_return_ does not jump to the true function return, but rather
// to some unlinking code).
bool function_return_is_shadowed_;
// True when we are in code that expects the virtual frame to be fully
// spilled. Some virtual frame function are disabled in DEBUG builds when
// called from spilled code, because they do not leave the virtual frame
// in a spilled state.
bool in_spilled_code_;
static InlineRuntimeLUT kInlineRuntimeLUT[];
friend class VirtualFrame;
friend class JumpTarget;
friend class Reference;
friend class Result;
friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
// Flag that indicates whether or not the code that handles smi arguments
// should be placed in the stub, inlined, or omitted entirely.
enum GenericBinaryFlags {
SMI_CODE_IN_STUB,
SMI_CODE_INLINED
};
class GenericBinaryOpStub: public CodeStub {
public:
GenericBinaryOpStub(Token::Value op,
OverwriteMode mode,
GenericBinaryFlags flags)
: op_(op), mode_(mode), flags_(flags) {
use_sse3_ = CpuFeatures::IsSupported(CpuFeatures::SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
void GenerateSmiCode(MacroAssembler* masm, Label* slow);
private:
Token::Value op_;
OverwriteMode mode_;
GenericBinaryFlags flags_;
bool use_sse3_;
const char* GetName();
#ifdef DEBUG
void Print() {
PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
Token::String(op_),
static_cast<int>(mode_),
static_cast<int>(flags_));
}
#endif
// Minor key encoding in 16 bits FSOOOOOOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 12> {};
class SSE3Bits: public BitField<bool, 14, 1> {};
class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
Major MajorKey() { return GenericBinaryOp; }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| FlagBits::encode(flags_)
| SSE3Bits::encode(use_sse3_);
}
void Generate(MacroAssembler* masm);
};
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_
......@@ -620,146 +620,6 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
void MacroAssembler::LoadAllocationTopHelper(
Register result,
Register result_end,
Register scratch,
bool result_contains_top_on_entry) {
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address();
// Just return if allocation top is already known.
if (result_contains_top_on_entry) {
// No use of scratch if allocation top is provided.
ASSERT(scratch.is(no_reg));
return;
}
// Move address of new object to result. Use scratch register if available.
if (scratch.is(no_reg)) {
mov(result, Operand::StaticVariable(new_space_allocation_top));
} else {
ASSERT(!scratch.is(result_end));
mov(Operand(scratch), Immediate(new_space_allocation_top));
mov(result, Operand(scratch, 0));
}
}
void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
Register scratch) {
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address();
// Update new top. Use scratch if available.
if (scratch.is(no_reg)) {
mov(Operand::StaticVariable(new_space_allocation_top), result_end);
} else {
mov(Operand(scratch, 0), result_end);
}
}
void MacroAssembler::AllocateObjectInNewSpace(
int object_size,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
bool result_contains_top_on_entry) {
ASSERT(!result.is(result_end));
// Load address of new object into result.
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address();
LoadAllocationTopHelper(result,
result_end,
scratch,
result_contains_top_on_entry);
// Calculate new top and bail out if new space is exhausted.
lea(result_end, Operand(result, object_size));
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required, not_taken);
// Update allocation top.
UpdateAllocationTopHelper(result_end, scratch);
}
void MacroAssembler::AllocateObjectInNewSpace(
int header_size,
ScaleFactor element_size,
Register element_count,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
bool result_contains_top_on_entry) {
ASSERT(!result.is(result_end));
// Load address of new object into result.
LoadAllocationTopHelper(result,
result_end,
scratch,
result_contains_top_on_entry);
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address();
lea(result_end, Operand(result, element_count, element_size, header_size));
cmp(result, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required);
// Update allocation top.
UpdateAllocationTopHelper(result_end, scratch);
}
void MacroAssembler::AllocateObjectInNewSpace(
Register object_size,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
bool result_contains_top_on_entry) {
ASSERT(!result.is(result_end));
// Load address of new object into result.
LoadAllocationTopHelper(result,
result_end,
scratch,
result_contains_top_on_entry);
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address();
if (!object_size.is(result_end)) {
mov(result_end, object_size);
}
add(result_end, Operand(result));
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required, not_taken);
// Update allocation top.
UpdateAllocationTopHelper(result_end, scratch);
}
void MacroAssembler::UndoAllocationInNewSpace(Register object) {
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address();
// Make sure the object has no tag before resetting top.
and_(Operand(object), Immediate(~kHeapObjectTagMask));
#ifdef DEBUG
cmp(object, Operand::StaticVariable(new_space_allocation_top));
Check(below, "Undo allocation of non allocated memory");
#endif
mov(Operand::StaticVariable(new_space_allocation_top), object);
}
void MacroAssembler::NegativeZeroTest(CodeGenerator* cgen,
Register result,
Register op,
......
......@@ -183,48 +183,6 @@ class MacroAssembler: public Assembler {
Label* miss);
// ---------------------------------------------------------------------------
// Allocation support
// Allocate an object in new space. If the new space is exhausted control
// continues at the gc_required label. The allocated object is returned in
// result and end of the new object is returned in result_end. The register
// scratch can be passed as no_reg in which case an additional object
// reference will be added to the reloc info. The returned pointers in result
// and result_end have not yet been tagged as heap objects. If
// result_contains_top_on_entry is true the contnt of result is known to be
// the allocation top on entry (could be result_end from a previous call to
// AllocateObjectInNewSpace). If result_contains_top_on_entry is true scratch
// should be no_reg as it is never used.
void AllocateObjectInNewSpace(int object_size,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
bool result_contains_top_on_entry);
void AllocateObjectInNewSpace(int header_size,
ScaleFactor element_size,
Register element_count,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
bool result_contains_top_on_entry);
void AllocateObjectInNewSpace(Register object_size,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
bool result_contains_top_on_entry);
// Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. Make sure that no pointers are left to the
// object(s) no longer allocated as they would be invalid when allocation is
// un-done.
void UndoAllocationInNewSpace(Register object);
// ---------------------------------------------------------------------------
// Support functions.
......@@ -345,13 +303,6 @@ class MacroAssembler: public Assembler {
// Activation support.
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
// Allocation support helpers.
void LoadAllocationTopHelper(Register result,
Register result_end,
Register scratch,
bool result_contains_top_on_entry);
void UpdateAllocationTopHelper(Register result_end, Register scratch);
};
......
......@@ -1786,7 +1786,17 @@ Object* ConstructStubCompiler::CompileConstructStub(
// Make sure that the maximum heap object size will never cause us
// problems here.
ASSERT(Heap::MaxObjectSizeInPagedSpace() >= JSObject::kMaxInstanceSize);
__ AllocateObjectInNewSpace(ecx, edx, ecx, no_reg, &generic_stub_call, false);
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address();
__ mov(edx, Operand::StaticVariable(new_space_allocation_top));
__ add(ecx, Operand(edx)); // Calculate new top.
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address();
__ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
__ j(above_equal, &generic_stub_call);
// Update new space top.
__ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
// Allocated the JSObject, now initialize the fields and add the heap tag.
// ebx: initial map
......@@ -1850,9 +1860,9 @@ Object* ConstructStubCompiler::CompileConstructStub(
__ mov(ebx, eax);
__ pop(eax);
// Remove caller arguments and receiver from the stack and return.
// Remove caller arguments from the stack and return.
__ pop(ecx);
__ lea(esp, Operand(esp, ebx, times_pointer_size, 1 * kPointerSize));
__ lea(esp, Operand(esp, ebx, times_4, 1 * kPointerSize)); // 1 ~ receiver
__ push(ecx);
__ IncrementCounter(&Counters::constructed_objects, 1);
__ IncrementCounter(&Counters::constructed_objects_stub, 1);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment