Implement clearing of CompareICs.

This allows CompareICs to be cleared during garbage collection to avoid
cross-context garbage retention through maps stored in CompareIC stubs
for the KNOWN_OBJECTS state.

R=vegorov@chromium.org
BUG=v8:2102

Review URL: https://chromiumcodereview.appspot.com/10263008

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@11491 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 7f5920dc
......@@ -1690,12 +1690,12 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Activate inlined smi code.
if (previous_state == UNINITIALIZED) {
PatchInlinedSmiCode(address());
PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
}
}
void PatchInlinedSmiCode(Address address) {
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
Address cmp_instruction_address =
address + Assembler::kCallTargetAddressOffset;
......@@ -1729,34 +1729,31 @@ void PatchInlinedSmiCode(Address address) {
Instr instr_at_patch = Assembler::instr_at(patch_address);
Instr branch_instr =
Assembler::instr_at(patch_address + Instruction::kInstrSize);
ASSERT(Assembler::IsCmpRegister(instr_at_patch));
ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
Assembler::GetRm(instr_at_patch).code());
// This is patching a conditional "jump if not smi/jump if smi" site.
// Enabling by changing from
// cmp rx, rx
// b eq/ne, <target>
// to
// tst rx, #kSmiTagMask
// b ne/eq, <target>
// and vice-versa to be disabled again.
CodePatcher patcher(patch_address, 2);
Register reg = Assembler::GetRn(instr_at_patch);
if (check == ENABLE_INLINED_SMI_CHECK) {
ASSERT(Assembler::IsCmpRegister(instr_at_patch));
ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
Assembler::GetRm(instr_at_patch).code());
patcher.masm()->tst(reg, Operand(kSmiTagMask));
} else {
ASSERT(check == DISABLE_INLINED_SMI_CHECK);
ASSERT(Assembler::IsTstImmediate(instr_at_patch));
patcher.masm()->cmp(reg, reg);
}
ASSERT(Assembler::IsBranch(branch_instr));
if (Assembler::GetCondition(branch_instr) == eq) {
// This is patching a "jump if not smi" site to be active.
// Changing
// cmp rx, rx
// b eq, <target>
// to
// tst rx, #kSmiTagMask
// b ne, <target>
CodePatcher patcher(patch_address, 2);
Register reg = Assembler::GetRn(instr_at_patch);
patcher.masm()->tst(reg, Operand(kSmiTagMask));
patcher.EmitCondition(ne);
} else {
ASSERT(Assembler::GetCondition(branch_instr) == ne);
// This is patching a "jump if smi" site to be active.
// Changing
// cmp rx, rx
// b ne, <target>
// to
// tst rx, #kSmiTagMask
// b eq, <target>
CodePatcher patcher(patch_address, 2);
Register reg = Assembler::GetRn(instr_at_patch);
patcher.masm()->tst(reg, Operand(kSmiTagMask));
patcher.EmitCondition(eq);
}
}
......
......@@ -3738,7 +3738,7 @@ CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
instructions_(instructions),
size_(instructions * Assembler::kInstrSize),
masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
masm_(NULL, address, size_ + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
......
......@@ -498,6 +498,7 @@ class ICCompareStub: public CodeStub {
virtual void FinishCode(Handle<Code> code) {
code->set_compare_state(state_);
code->set_compare_operation(op_);
}
virtual CodeStub::Major MajorKey() { return CompareIC; }
......
......@@ -640,6 +640,9 @@ class Assembler : public AssemblerBase {
static const byte kJccShortPrefix = 0x70;
static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
static const byte kJcShortOpcode = kJccShortPrefix | carry;
static const byte kJnzShortOpcode = kJccShortPrefix | not_zero;
static const byte kJzShortOpcode = kJccShortPrefix | zero;
// ---------------------------------------------------------------------------
// Code generation
......
......@@ -1727,12 +1727,12 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Activate inlined smi code.
if (previous_state == UNINITIALIZED) {
PatchInlinedSmiCode(address());
PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
}
}
void PatchInlinedSmiCode(Address address) {
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
......@@ -1753,14 +1753,18 @@ void PatchInlinedSmiCode(Address address) {
address, test_instruction_address, delta);
}
// Patch with a short conditional jump. There must be a
// short jump-if-carry/not-carry at this position.
// Patch with a short conditional jump. Enabling means switching from a short
// jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
// reverse operation of that.
Address jmp_address = test_instruction_address - delta;
ASSERT(*jmp_address == Assembler::kJncShortOpcode ||
*jmp_address == Assembler::kJcShortOpcode);
Condition cc = *jmp_address == Assembler::kJncShortOpcode
? not_zero
: zero;
ASSERT((check == ENABLE_INLINED_SMI_CHECK)
? (*jmp_address == Assembler::kJncShortOpcode ||
*jmp_address == Assembler::kJcShortOpcode)
: (*jmp_address == Assembler::kJnzShortOpcode ||
*jmp_address == Assembler::kJzShortOpcode));
Condition cc = (check == ENABLE_INLINED_SMI_CHECK)
? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
: (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
*jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
}
......
......@@ -2566,7 +2566,7 @@ bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
CodePatcher::CodePatcher(byte* address, int size)
: address_(address),
size_(size),
masm_(Isolate::Current(), address, size + Assembler::kGap) {
masm_(NULL, address, size + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
......
......@@ -352,9 +352,9 @@ void IC::Clear(Address address) {
return KeyedStoreIC::Clear(address, target);
case Code::CALL_IC: return CallIC::Clear(address, target);
case Code::KEYED_CALL_IC: return KeyedCallIC::Clear(address, target);
case Code::COMPARE_IC: return CompareIC::Clear(address, target);
case Code::UNARY_OP_IC:
case Code::BINARY_OP_IC:
case Code::COMPARE_IC:
case Code::TO_BOOLEAN_IC:
// Clearing these is tricky and does not
// make any performance difference.
......@@ -365,9 +365,8 @@ void IC::Clear(Address address) {
void CallICBase::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
bool contextual = CallICBase::Contextual::decode(target->extra_ic_state());
State state = target->ic_state();
if (state == UNINITIALIZED) return;
Code* code =
Isolate::Current()->stub_cache()->FindCallInitialize(
target->arguments_count(),
......@@ -410,6 +409,17 @@ void KeyedStoreIC::Clear(Address address, Code* target) {
}
void CompareIC::Clear(Address address, Code* target) {
// Only clear ICCompareStubs, we currently cannot clear generic CompareStubs.
if (target->major_key() != CodeStub::CompareIC) return;
// Only clear CompareICs that can retain objects.
if (target->compare_state() != KNOWN_OBJECTS) return;
Token::Value op = CompareIC::ComputeOperation(target);
SetTargetAtAddress(address, GetRawUninitialized(op));
PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK);
}
static bool HasInterceptorGetter(JSObject* object) {
return !object->GetNamedInterceptor()->getter()->IsUndefined();
}
......@@ -2396,7 +2406,7 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
// Activate inlined smi code.
if (previous_type == BinaryOpIC::UNINITIALIZED) {
PatchInlinedSmiCode(ic.address());
PatchInlinedSmiCode(ic.address(), ENABLE_INLINED_SMI_CHECK);
}
}
......@@ -2457,6 +2467,14 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
}
Code* CompareIC::GetRawUninitialized(Token::Value op) {
ICCompareStub stub(op, UNINITIALIZED);
Code* code = NULL;
CHECK(stub.FindCodeInCache(&code));
return code;
}
Handle<Code> CompareIC::GetUninitialized(Token::Value op) {
ICCompareStub stub(op, UNINITIALIZED);
return stub.GetCode();
......@@ -2471,6 +2489,12 @@ CompareIC::State CompareIC::ComputeState(Code* target) {
}
Token::Value CompareIC::ComputeOperation(Code* target) {
ASSERT(target->major_key() == CodeStub::CompareIC);
return static_cast<Token::Value>(target->compare_operation());
}
const char* CompareIC::GetStateName(State state) {
switch (state) {
case UNINITIALIZED: return "UNINITIALIZED";
......
......@@ -794,6 +794,9 @@ class CompareIC: public IC {
// Helper function for determining the state of a compare IC.
static State ComputeState(Code* target);
// Helper function for determining the operation a compare IC is for.
static Token::Value ComputeOperation(Code* target);
static const char* GetStateName(State state);
private:
......@@ -804,7 +807,13 @@ class CompareIC: public IC {
Condition GetCondition() const { return ComputeCondition(op_); }
State GetState() { return ComputeState(target()); }
static Code* GetRawUninitialized(Token::Value op);
static void Clear(Address address, Code* target);
Token::Value op_;
friend class IC;
};
......@@ -817,7 +826,8 @@ class ToBooleanIC: public IC {
// Helper for BinaryOpIC and CompareIC.
void PatchInlinedSmiCode(Address address);
enum InlinedSmiCheck { ENABLE_INLINED_SMI_CHECK, DISABLE_INLINED_SMI_CHECK };
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check);
} } // namespace v8::internal
......
......@@ -3193,6 +3193,18 @@ void Code::set_compare_state(byte value) {
}
byte Code::compare_operation() {
ASSERT(is_compare_ic_stub());
return READ_BYTE_FIELD(this, kCompareOperationOffset);
}
void Code::set_compare_operation(byte value) {
ASSERT(is_compare_ic_stub());
WRITE_BYTE_FIELD(this, kCompareOperationOffset, value);
}
byte Code::to_boolean_state() {
ASSERT(is_to_boolean_ic_stub());
return READ_BYTE_FIELD(this, kToBooleanTypeOffset);
......
......@@ -8378,6 +8378,10 @@ void Code::Disassemble(const char* name, FILE* out) {
CompareIC::State state = CompareIC::ComputeState(this);
PrintF(out, "compare_state = %s\n", CompareIC::GetStateName(state));
}
if (is_compare_ic_stub() && major_key() == CodeStub::CompareIC) {
Token::Value op = CompareIC::ComputeOperation(this);
PrintF(out, "compare_operation = %s\n", Token::Name(op));
}
}
if ((name != NULL) && (name[0] != '\0')) {
PrintF(out, "name = %s\n", name);
......
......@@ -4291,6 +4291,11 @@ class Code: public HeapObject {
inline byte compare_state();
inline void set_compare_state(byte value);
// [compare_operation]: For kind COMPARE_IC tells what compare operation the
// stub was generated for.
inline byte compare_operation();
inline void set_compare_operation(byte value);
// [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in.
inline byte to_boolean_state();
inline void set_to_boolean_state(byte value);
......@@ -4474,6 +4479,8 @@ class Code: public HeapObject {
static const int kBinaryOpReturnTypeOffset = kBinaryOpTypeOffset + 1;
static const int kCompareOperationOffset = kCompareStateOffset + 1;
static const int kAllowOSRAtLoopNestingLevelOffset = kFullCodeFlags + 1;
static const int kProfilerTicksOffset = kAllowOSRAtLoopNestingLevelOffset + 1;
......
......@@ -629,7 +629,8 @@ class Assembler : public AssemblerBase {
static const byte kJccShortPrefix = 0x70;
static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
static const byte kJcShortOpcode = kJccShortPrefix | carry;
static const byte kJnzShortOpcode = kJccShortPrefix | not_zero;
static const byte kJzShortOpcode = kJccShortPrefix | zero;
// ---------------------------------------------------------------------------
......
......@@ -1741,11 +1741,11 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Activate inlined smi code.
if (previous_state == UNINITIALIZED) {
PatchInlinedSmiCode(address());
PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
}
}
void PatchInlinedSmiCode(Address address) {
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
......@@ -1766,14 +1766,18 @@ void PatchInlinedSmiCode(Address address) {
address, test_instruction_address, delta);
}
// Patch with a short conditional jump. There must be a
// short jump-if-carry/not-carry at this position.
// Patch with a short conditional jump. Enabling means switching from a short
// jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
// reverse operation of that.
Address jmp_address = test_instruction_address - delta;
ASSERT(*jmp_address == Assembler::kJncShortOpcode ||
*jmp_address == Assembler::kJcShortOpcode);
Condition cc = *jmp_address == Assembler::kJncShortOpcode
? not_zero
: zero;
ASSERT((check == ENABLE_INLINED_SMI_CHECK)
? (*jmp_address == Assembler::kJncShortOpcode ||
*jmp_address == Assembler::kJcShortOpcode)
: (*jmp_address == Assembler::kJnzShortOpcode ||
*jmp_address == Assembler::kJzShortOpcode));
Condition cc = (check == ENABLE_INLINED_SMI_CHECK)
? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
: (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
*jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
}
......
......@@ -4188,7 +4188,7 @@ bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
CodePatcher::CodePatcher(byte* address, int size)
: address_(address),
size_(size),
masm_(Isolate::Current(), address, size + Assembler::kGap) {
masm_(NULL, address, size + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment