Simple type tracking in the fast code generator.

Initial implementation of ad hoc must-be-smi tracking in the fast code
generator.  Type information is used to avoid the write barrier for
smi property stores and to avoid the smi check for the inputs/output
of bitwise OR.

Review URL: http://codereview.chromium.org/597021

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@3833 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 638cb4f9
......@@ -62,6 +62,9 @@ void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> cell) {
__ cmp(destination(), ip);
__ Check(ne, "DontDelete cells can't contain the hole");
}
// The loaded value is not known to be a smi.
clear_as_smi(destination());
}
......@@ -75,21 +78,37 @@ void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
int index = lookup.GetFieldIndex() - map->inobject_properties();
int offset = index * kPointerSize;
// We will emit the write barrier unless the stored value is statically
// known to be a smi.
bool needs_write_barrier = !is_smi(accumulator0());
// Negative offsets are inobject properties.
if (offset < 0) {
offset += map->instance_size();
__ mov(scratch0(), receiver_reg()); // Copy receiver for write barrier.
__ str(accumulator0(), FieldMemOperand(receiver_reg(), offset));
if (needs_write_barrier) {
// Preserve receiver from write barrier.
__ mov(scratch0(), receiver_reg());
}
} else {
offset += FixedArray::kHeaderSize;
__ ldr(scratch0(),
FieldMemOperand(receiver_reg(), JSObject::kPropertiesOffset));
__ str(accumulator0(), FieldMemOperand(scratch0(), offset));
}
// Perform the store.
__ str(accumulator0(), FieldMemOperand(scratch0(), offset));
__ mov(scratch1(), Operand(offset));
__ RecordWrite(scratch0(), scratch1(), ip);
if (needs_write_barrier) {
__ mov(scratch1(), Operand(offset));
__ RecordWrite(scratch0(), scratch1(), ip);
}
if (destination().is(accumulator1())) {
__ mov(accumulator1(), accumulator0());
if (is_smi(accumulator0())) {
set_as_smi(accumulator1());
} else {
clear_as_smi(accumulator1());
}
}
}
......@@ -115,30 +134,41 @@ void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
FieldMemOperand(receiver_reg(), JSObject::kPropertiesOffset));
__ ldr(destination(), FieldMemOperand(scratch0(), offset));
}
// The loaded value is not known to be a smi.
clear_as_smi(destination());
}
void FastCodeGenerator::EmitBitOr() {
Register check; // A register is used for the smi check/operation.
if (destination().is(no_reg)) {
check = scratch0(); // Do not clobber either operand register.
} else {
// Preserve whichever operand shares the destination register in case we
// have to bail out.
__ mov(scratch0(), destination());
check = destination();
}
__ orr(check, accumulator1(), Operand(accumulator0()));
// Restore the clobbered operand if necessary.
if (destination().is(no_reg)) {
__ BranchOnNotSmi(check, bailout());
if (is_smi(accumulator0()) && is_smi(accumulator1())) {
// If both operands are known to be a smi then there is no need to check
// the operands or result. There is no need to perform the operation in
// an effect context.
if (!destination().is(no_reg)) {
__ orr(destination(), accumulator1(), Operand(accumulator0()));
}
} else if (destination().is(no_reg)) {
// Result is not needed but do not clobber the operands in case of
// bailout.
__ orr(scratch0(), accumulator1(), Operand(accumulator0()));
__ BranchOnNotSmi(scratch0(), bailout());
} else {
// Preserve the destination operand in a scratch register in case of
// bailout.
Label done;
__ BranchOnSmi(check, &done);
__ mov(scratch0(), destination());
__ orr(destination(), accumulator1(), Operand(accumulator0()));
__ BranchOnSmi(destination(), &done);
__ mov(destination(), scratch0());
__ jmp(bailout());
__ bind(&done);
}
// If we didn't bailout, the result (in fact, both inputs too) is known to
// be a smi.
set_as_smi(accumulator0());
set_as_smi(accumulator1());
}
......
......@@ -66,7 +66,7 @@ class FastCodeGenSyntaxChecker: public AstVisitor {
class FastCodeGenerator: public AstVisitor {
public:
explicit FastCodeGenerator(MacroAssembler* masm)
: masm_(masm), info_(NULL), destination_(no_reg) {
: masm_(masm), info_(NULL), destination_(no_reg), smi_bits_(0) {
}
static Handle<Code> MakeCode(CompilationInfo* info);
......@@ -97,6 +97,21 @@ class FastCodeGenerator: public AstVisitor {
return (reg.is(accumulator0())) ? accumulator1() : accumulator0();
}
// Flags are true if the respective register is statically known to hold a
// smi. We do not track every register, only the accumulator registers.
bool is_smi(Register reg) {
ASSERT(!reg.is(no_reg));
return (smi_bits_ & reg.bit()) != 0;
}
void set_as_smi(Register reg) {
ASSERT(!reg.is(no_reg));
smi_bits_ = smi_bits_ | reg.bit();
}
void clear_as_smi(Register reg) {
ASSERT(!reg.is(no_reg));
smi_bits_ = smi_bits_ & ~reg.bit();
}
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
......@@ -129,6 +144,7 @@ class FastCodeGenerator: public AstVisitor {
CompilationInfo* info_;
Label bailout_;
Register destination_;
uint32_t smi_bits_;
DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator);
};
......
......@@ -61,6 +61,9 @@ void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> cell) {
__ cmp(destination(), Factory::the_hole_value());
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
// The loaded value is not known to be a smi.
clear_as_smi(destination());
}
......@@ -74,26 +77,43 @@ void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
int index = lookup.GetFieldIndex() - map->inobject_properties();
int offset = index * kPointerSize;
// Negative offsets are inobject properties.
// We will emit the write barrier unless the stored value is statically
// known to be a smi.
bool needs_write_barrier = !is_smi(accumulator0());
// Perform the store. Negative offsets are inobject properties.
if (offset < 0) {
offset += map->instance_size();
__ mov(scratch0(), receiver_reg()); // Copy receiver for write barrier.
__ mov(FieldOperand(receiver_reg(), offset), accumulator0());
if (needs_write_barrier) {
// Preserve receiver from write barrier.
__ mov(scratch0(), receiver_reg());
}
} else {
offset += FixedArray::kHeaderSize;
__ mov(scratch0(),
FieldOperand(receiver_reg(), JSObject::kPropertiesOffset));
__ mov(FieldOperand(scratch0(), offset), accumulator0());
}
// Perform the store.
__ mov(FieldOperand(scratch0(), offset), accumulator0());
if (destination().is(no_reg)) {
__ RecordWrite(scratch0(), offset, accumulator0(), scratch1());
} else {
// Copy the value to the other accumulator to preserve a copy from the
// write barrier. One of the accumulators is available as a scratch
// register.
if (needs_write_barrier) {
if (destination().is(no_reg)) {
// After RecordWrite accumulator0 is only accidently a smi, but it is
// already marked as not known to be one.
__ RecordWrite(scratch0(), offset, accumulator0(), scratch1());
} else {
// Copy the value to the other accumulator to preserve a copy from the
// write barrier. One of the accumulators is available as a scratch
// register. Neither is a smi.
__ mov(accumulator1(), accumulator0());
clear_as_smi(accumulator1());
Register value_scratch = other_accumulator(destination());
__ RecordWrite(scratch0(), offset, value_scratch, scratch1());
}
} else if (destination().is(accumulator1())) {
__ mov(accumulator1(), accumulator0());
Register value_scratch = other_accumulator(destination());
__ RecordWrite(scratch0(), offset, value_scratch, scratch1());
// Is a smi because we do not need the write barrier.
set_as_smi(accumulator1());
}
}
......@@ -119,36 +139,46 @@ void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
FieldOperand(receiver_reg(), JSObject::kPropertiesOffset));
__ mov(destination(), FieldOperand(scratch0(), offset));
}
// The loaded value is not known to be a smi.
clear_as_smi(destination());
}
void FastCodeGenerator::EmitBitOr() {
Register copied; // One operand is copied to a scratch register.
Register other; // The other is not modified by the operation.
Register check; // A register is used for the smi check/operation.
if (destination().is(no_reg)) {
copied = accumulator1(); // Arbitrary choice of operand to copy.
other = accumulator0();
check = scratch0(); // Do not clobber either operand register.
} else {
copied = destination();
other = other_accumulator(destination());
check = destination();
}
__ mov(scratch0(), copied);
__ or_(check, Operand(other));
__ test(check, Immediate(kSmiTagMask));
// Restore the clobbered operand if necessary.
if (destination().is(no_reg)) {
if (is_smi(accumulator0()) && is_smi(accumulator1())) {
// If both operands are known to be a smi then there is no need to check
// the operands or result. There is no need to perform the operation in
// an effect context.
if (!destination().is(no_reg)) {
// Leave the result in the destination register. Bitwise or is
// commutative.
__ or_(destination(), Operand(other_accumulator(destination())));
}
} else if (destination().is(no_reg)) {
// Result is not needed but do not clobber the operands in case of
// bailout.
__ mov(scratch0(), accumulator1());
__ or_(scratch0(), Operand(accumulator0()));
__ test(scratch0(), Immediate(kSmiTagMask));
__ j(not_zero, bailout(), not_taken);
} else {
// Preserve the destination operand in a scratch register in case of
// bailout.
Label done;
__ mov(scratch0(), destination());
__ or_(destination(), Operand(other_accumulator(destination())));
__ test(destination(), Immediate(kSmiTagMask));
__ j(zero, &done, taken);
__ mov(copied, scratch0());
__ mov(destination(), scratch0());
__ jmp(bailout());
__ bind(&done);
}
// If we didn't bailout, the result (in fact, both inputs too) is known to
// be a smi.
set_as_smi(accumulator0());
set_as_smi(accumulator1());
}
......
......@@ -53,6 +53,7 @@ void FastCodeGenerator::EmitLoadReceiver() {
void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> cell) {
ASSERT(!destination().is(no_reg));
ASSERT(cell->IsJSGlobalPropertyCell());
__ Move(destination(), cell);
__ movq(destination(),
FieldOperand(destination(), JSGlobalPropertyCell::kValueOffset));
......@@ -60,6 +61,9 @@ void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> cell) {
__ Cmp(destination(), Factory::the_hole_value());
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
// The loaded value is not known to be a smi.
clear_as_smi(destination());
}
......@@ -73,26 +77,43 @@ void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
int index = lookup.GetFieldIndex() - map->inobject_properties();
int offset = index * kPointerSize;
// Negative offsets are inobject properties.
// We will emit the write barrier unless the stored value is statically
// known to be a smi.
bool needs_write_barrier = !is_smi(accumulator0());
// Perform the store. Negative offsets are inobject properties.
if (offset < 0) {
offset += map->instance_size();
__ movq(scratch0(), receiver_reg()); // Copy receiver for write barrier.
__ movq(FieldOperand(receiver_reg(), offset), accumulator0());
if (needs_write_barrier) {
// Preserve receiver from write barrier.
__ movq(scratch0(), receiver_reg());
}
} else {
offset += FixedArray::kHeaderSize;
__ movq(scratch0(),
FieldOperand(receiver_reg(), JSObject::kPropertiesOffset));
__ movq(FieldOperand(scratch0(), offset), accumulator0());
}
// Perform the store.
__ movq(FieldOperand(scratch0(), offset), accumulator0());
if (destination().is(no_reg)) {
__ RecordWrite(scratch0(), offset, accumulator0(), scratch1());
} else {
// Copy the value to the other accumulator to preserve a copy from the
// write barrier. One of the accumulators is available as a scratch
// register.
if (needs_write_barrier) {
if (destination().is(no_reg)) {
// After RecordWrite accumulator0 is only accidently a smi, but it is
// already marked as not known to be one.
__ RecordWrite(scratch0(), offset, accumulator0(), scratch1());
} else {
// Copy the value to the other accumulator to preserve a copy from the
// write barrier. One of the accumulators is available as a scratch
// register. Neither is a smi.
__ movq(accumulator1(), accumulator0());
clear_as_smi(accumulator1());
Register value_scratch = other_accumulator(destination());
__ RecordWrite(scratch0(), offset, value_scratch, scratch1());
}
} else if (destination().is(accumulator1())) {
__ movq(accumulator1(), accumulator0());
Register value_scratch = other_accumulator(destination());
__ RecordWrite(scratch0(), offset, value_scratch, scratch1());
// Is a smi because we do not need the write barrier.
set_as_smi(accumulator1());
}
}
......@@ -118,34 +139,46 @@ void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
FieldOperand(receiver_reg(), JSObject::kPropertiesOffset));
__ movq(destination(), FieldOperand(scratch0(), offset));
}
// The loaded value is not known to be a smi.
clear_as_smi(destination());
}
void FastCodeGenerator::EmitBitOr() {
Register copied; // One operand is copied to a scratch register.
Register other; // The other is not modified by the operation.
Register check; // A register is used for the smi check/operation.
if (destination().is(no_reg)) {
copied = accumulator1(); // Arbitrary choice of operand to copy.
other = accumulator0();
check = scratch0(); // Do not clobber either operand register.
} else {
copied = destination();
other = other_accumulator(destination());
check = destination();
}
__ movq(scratch0(), copied);
__ or_(check, other);
// Restore the clobbered operand if necessary.
if (destination().is(no_reg)) {
__ JumpIfNotSmi(check, bailout());
if (is_smi(accumulator0()) && is_smi(accumulator1())) {
// If both operands are known to be a smi then there is no need to check
// the operands or result.
if (destination().is(no_reg)) {
__ or_(accumulator1(), accumulator0());
} else {
// Leave the result in the destination register. Bitwise or is
// commutative.
__ or_(destination(), other_accumulator(destination()));
}
} else if (destination().is(no_reg)) {
// Result is not needed but do not clobber the operands in case of
// bailout.
__ movq(scratch0(), accumulator1());
__ or_(scratch0(), accumulator0());
__ JumpIfNotSmi(scratch0(), bailout());
} else {
// Preserve the destination operand in a scratch register in case of
// bailout.
Label done;
__ JumpIfSmi(check, &done);
__ movq(copied, scratch0());
__ movq(scratch0(), destination());
__ or_(destination(), other_accumulator(destination()));
__ JumpIfSmi(destination(), &done);
__ movq(destination(), scratch0());
__ jmp(bailout());
__ bind(&done);
}
// If we didn't bailout, the result (in fact, both inputs too) is known to
// be a smi.
set_as_smi(accumulator0());
set_as_smi(accumulator1());
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment