Prevent modification of cached normalized maps.

Finally sovles the problem that r5342 attempted to solve.
When adding a stub to a map's code cache we need to make
sure that this map is not used by object that do not need
this stub.

Existing solution had 2 flaws:
1. It checked that the map is cached by asking the current context.
If the object escaped into another context then NormalizedMapCache::Contains
returns false negative.

2. If a map gets evicted from the cache we should not try to modify it
even though Contains returns false.

This patch implements much less fragile solution of the same problem:
A map now has a flag (is_shared) that is set once the map is added
to a cache, stays set even after the cache eviction, and is cleared
if the object goes back to fast mode.

Added a regression test.

Review URL: http://codereview.chromium.org/3472006

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@5518 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 830185b1
...@@ -649,8 +649,9 @@ void Map::MapVerify() { ...@@ -649,8 +649,9 @@ void Map::MapVerify() {
} }
void Map::NormalizedMapVerify() { void Map::SharedMapVerify() {
MapVerify(); MapVerify();
ASSERT(is_shared());
ASSERT_EQ(Heap::empty_descriptor_array(), instance_descriptors()); ASSERT_EQ(Heap::empty_descriptor_array(), instance_descriptors());
ASSERT_EQ(Heap::empty_fixed_array(), code_cache()); ASSERT_EQ(Heap::empty_fixed_array(), code_cache());
ASSERT_EQ(0, pre_allocated_property_fields()); ASSERT_EQ(0, pre_allocated_property_fields());
...@@ -1381,7 +1382,7 @@ void NormalizedMapCache::NormalizedMapCacheVerify() { ...@@ -1381,7 +1382,7 @@ void NormalizedMapCache::NormalizedMapCacheVerify() {
for (int i = 0; i < length(); i++) { for (int i = 0; i < length(); i++) {
Object* e = get(i); Object* e = get(i);
if (e->IsMap()) { if (e->IsMap()) {
Map::cast(e)->NormalizedMapVerify(); Map::cast(e)->SharedMapVerify();
} else { } else {
ASSERT(e->IsUndefined()); ASSERT(e->IsUndefined());
} }
......
...@@ -2292,6 +2292,19 @@ bool Map::attached_to_shared_function_info() { ...@@ -2292,6 +2292,19 @@ bool Map::attached_to_shared_function_info() {
} }
void Map::set_is_shared(bool value) {
if (value) {
set_bit_field2(bit_field2() | (1 << kIsShared));
} else {
set_bit_field2(bit_field2() & ~(1 << kIsShared));
}
}
bool Map::is_shared() {
return ((1 << kIsShared) & bit_field2()) != 0;
}
JSFunction* Map::unchecked_constructor() { JSFunction* Map::unchecked_constructor() {
return reinterpret_cast<JSFunction*>(READ_FIELD(this, kConstructorOffset)); return reinterpret_cast<JSFunction*>(READ_FIELD(this, kConstructorOffset));
} }
......
...@@ -2099,61 +2099,34 @@ PropertyAttributes JSObject::GetLocalPropertyAttribute(String* name) { ...@@ -2099,61 +2099,34 @@ PropertyAttributes JSObject::GetLocalPropertyAttribute(String* name) {
} }
bool NormalizedMapCache::IsCacheable(JSObject* object) {
// Caching for global objects is not worth it (there are too few of them).
return !object->IsGlobalObject();
}
Object* NormalizedMapCache::Get(JSObject* obj, PropertyNormalizationMode mode) { Object* NormalizedMapCache::Get(JSObject* obj, PropertyNormalizationMode mode) {
Object* result;
Map* fast = obj->map(); Map* fast = obj->map();
if (!IsCacheable(obj)) { int index = Hash(fast) % kEntries;
result = fast->CopyNormalized(mode); Object* result = get(index);
if (result->IsFailure()) return result; if (result->IsMap() && CheckHit(Map::cast(result), fast, mode)) {
} else {
int index = Hash(fast) % kEntries;
result = get(index);
if (result->IsMap() && CheckHit(Map::cast(result), fast, mode)) {
#ifdef DEBUG #ifdef DEBUG
if (FLAG_enable_slow_asserts) { if (FLAG_enable_slow_asserts) {
// Make sure that the new slow map has exactly the same hash as the // The cached map should match newly created normalized map bit-by-bit.
// original fast map. This way we can use hash to check if a slow map Object* fresh = fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
// is already in the hash (see Contains method). if (!fresh->IsFailure()) {
ASSERT(Hash(fast) == Hash(Map::cast(result))); ASSERT(memcmp(Map::cast(fresh)->address(),
// The cached map should match newly created normalized map bit-by-bit. Map::cast(result)->address(),
Object* fresh = fast->CopyNormalized(mode); Map::kSize) == 0);
if (!fresh->IsFailure()) {
ASSERT(memcmp(Map::cast(fresh)->address(),
Map::cast(result)->address(),
Map::kSize) == 0);
}
} }
#endif
return result;
} }
#endif
result = fast->CopyNormalized(mode); return result;
if (result->IsFailure()) return result;
set(index, result);
} }
result = fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
if (result->IsFailure()) return result;
set(index, result);
Counters::normalized_maps.Increment(); Counters::normalized_maps.Increment();
return result; return result;
} }
bool NormalizedMapCache::Contains(Map* map) {
// If the map is present in the cache it can only be at one place:
// at the index calculated from the hash. We assume that a slow map has the
// same hash as a fast map it has been generated from.
int index = Hash(map) % kEntries;
return get(index) == map;
}
void NormalizedMapCache::Clear() { void NormalizedMapCache::Clear() {
int entries = length(); int entries = length();
for (int i = 0; i != entries; i++) { for (int i = 0; i != entries; i++) {
...@@ -2184,7 +2157,7 @@ bool NormalizedMapCache::CheckHit(Map* slow, ...@@ -2184,7 +2157,7 @@ bool NormalizedMapCache::CheckHit(Map* slow,
Map* fast, Map* fast,
PropertyNormalizationMode mode) { PropertyNormalizationMode mode) {
#ifdef DEBUG #ifdef DEBUG
slow->NormalizedMapVerify(); slow->SharedMapVerify();
#endif #endif
return return
slow->constructor() == fast->constructor() && slow->constructor() == fast->constructor() &&
...@@ -2194,17 +2167,17 @@ bool NormalizedMapCache::CheckHit(Map* slow, ...@@ -2194,17 +2167,17 @@ bool NormalizedMapCache::CheckHit(Map* slow,
fast->inobject_properties()) && fast->inobject_properties()) &&
slow->instance_type() == fast->instance_type() && slow->instance_type() == fast->instance_type() &&
slow->bit_field() == fast->bit_field() && slow->bit_field() == fast->bit_field() &&
slow->bit_field2() == fast->bit_field2(); (slow->bit_field2() & ~(1<<Map::kIsShared)) == fast->bit_field2();
} }
Object* JSObject::UpdateMapCodeCache(String* name, Code* code) { Object* JSObject::UpdateMapCodeCache(String* name, Code* code) {
if (!HasFastProperties() && if (map()->is_shared()) {
NormalizedMapCache::IsCacheable(this) && // Fast case maps are never marked as shared.
Top::context()->global_context()->normalized_map_cache()-> ASSERT(!HasFastProperties());
Contains(map())) { // Replace the map with an identical copy that can be safely modified.
// Replace the map with the identical copy that can be safely modified. Object* obj = map()->CopyNormalized(KEEP_INOBJECT_PROPERTIES,
Object* obj = map()->CopyNormalized(KEEP_INOBJECT_PROPERTIES); UNIQUE_NORMALIZED_MAP);
if (obj->IsFailure()) return obj; if (obj->IsFailure()) return obj;
Counters::normalized_maps.Increment(); Counters::normalized_maps.Increment();
...@@ -3189,12 +3162,14 @@ Object* Map::CopyDropDescriptors() { ...@@ -3189,12 +3162,14 @@ Object* Map::CopyDropDescriptors() {
} }
Map::cast(result)->set_bit_field(bit_field()); Map::cast(result)->set_bit_field(bit_field());
Map::cast(result)->set_bit_field2(bit_field2()); Map::cast(result)->set_bit_field2(bit_field2());
Map::cast(result)->set_is_shared(false);
Map::cast(result)->ClearCodeCache(); Map::cast(result)->ClearCodeCache();
return result; return result;
} }
Object* Map::CopyNormalized(PropertyNormalizationMode mode) { Object* Map::CopyNormalized(PropertyNormalizationMode mode,
NormalizedMapSharingMode sharing) {
int new_instance_size = instance_size(); int new_instance_size = instance_size();
if (mode == CLEAR_INOBJECT_PROPERTIES) { if (mode == CLEAR_INOBJECT_PROPERTIES) {
new_instance_size -= inobject_properties() * kPointerSize; new_instance_size -= inobject_properties() * kPointerSize;
...@@ -3213,8 +3188,12 @@ Object* Map::CopyNormalized(PropertyNormalizationMode mode) { ...@@ -3213,8 +3188,12 @@ Object* Map::CopyNormalized(PropertyNormalizationMode mode) {
Map::cast(result)->set_bit_field(bit_field()); Map::cast(result)->set_bit_field(bit_field());
Map::cast(result)->set_bit_field2(bit_field2()); Map::cast(result)->set_bit_field2(bit_field2());
Map::cast(result)->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
#ifdef DEBUG #ifdef DEBUG
Map::cast(result)->NormalizedMapVerify(); if (Map::cast(result)->is_shared()) {
Map::cast(result)->SharedMapVerify();
}
#endif #endif
return result; return result;
......
...@@ -200,6 +200,14 @@ enum PropertyNormalizationMode { ...@@ -200,6 +200,14 @@ enum PropertyNormalizationMode {
}; };
// NormalizedMapSharingMode is used to specify whether a map may be shared
// by different objects with normalized properties.
enum NormalizedMapSharingMode {
UNIQUE_NORMALIZED_MAP,
SHARED_NORMALIZED_MAP
};
// Instance size sentinel for objects of variable size. // Instance size sentinel for objects of variable size.
static const int kVariableSizeSentinel = 0; static const int kVariableSizeSentinel = 0;
...@@ -2509,12 +2517,8 @@ class NormalizedMapCache: public FixedArray { ...@@ -2509,12 +2517,8 @@ class NormalizedMapCache: public FixedArray {
public: public:
static const int kEntries = 64; static const int kEntries = 64;
static bool IsCacheable(JSObject* object);
Object* Get(JSObject* object, PropertyNormalizationMode mode); Object* Get(JSObject* object, PropertyNormalizationMode mode);
bool Contains(Map* map);
void Clear(); void Clear();
// Casting // Casting
...@@ -3176,6 +3180,13 @@ class Map: public HeapObject { ...@@ -3176,6 +3180,13 @@ class Map: public HeapObject {
inline bool attached_to_shared_function_info(); inline bool attached_to_shared_function_info();
// Tells whether the map is shared between objects that may have different
// behavior. If true, the map should never be modified, instead a clone
// should be created and modified.
inline void set_is_shared(bool value);
inline bool is_shared();
// Tells whether the instance needs security checks when accessing its // Tells whether the instance needs security checks when accessing its
// properties. // properties.
inline void set_is_access_check_needed(bool access_check_needed); inline void set_is_access_check_needed(bool access_check_needed);
...@@ -3197,7 +3208,8 @@ class Map: public HeapObject { ...@@ -3197,7 +3208,8 @@ class Map: public HeapObject {
MUST_USE_RESULT Object* CopyDropDescriptors(); MUST_USE_RESULT Object* CopyDropDescriptors();
MUST_USE_RESULT Object* CopyNormalized(PropertyNormalizationMode mode); MUST_USE_RESULT Object* CopyNormalized(PropertyNormalizationMode mode,
NormalizedMapSharingMode sharing);
// Returns a copy of the map, with all transitions dropped from the // Returns a copy of the map, with all transitions dropped from the
// instance descriptors. // instance descriptors.
...@@ -3261,7 +3273,7 @@ class Map: public HeapObject { ...@@ -3261,7 +3273,7 @@ class Map: public HeapObject {
#ifdef DEBUG #ifdef DEBUG
void MapPrint(); void MapPrint();
void MapVerify(); void MapVerify();
void NormalizedMapVerify(); void SharedMapVerify();
#endif #endif
inline int visitor_id(); inline int visitor_id();
...@@ -3325,6 +3337,7 @@ class Map: public HeapObject { ...@@ -3325,6 +3337,7 @@ class Map: public HeapObject {
static const int kHasFastElements = 2; static const int kHasFastElements = 2;
static const int kStringWrapperSafeForDefaultValueOf = 3; static const int kStringWrapperSafeForDefaultValueOf = 3;
static const int kAttachedToSharedFunctionInfo = 4; static const int kAttachedToSharedFunctionInfo = 4;
static const int kIsShared = 5;
// Layout of the default cache. It holds alternating name and code objects. // Layout of the default cache. It holds alternating name and code objects.
static const int kCodeCacheEntrySize = 2; static const int kCodeCacheEntrySize = 2;
......
...@@ -2937,6 +2937,49 @@ THREADED_TEST(NamedInterceptorDictionaryIC) { ...@@ -2937,6 +2937,49 @@ THREADED_TEST(NamedInterceptorDictionaryIC) {
} }
THREADED_TEST(NamedInterceptorDictionaryICMultipleContext) {
v8::HandleScope scope;
v8::Persistent<Context> context1 = Context::New();
context1->Enter();
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(XPropertyGetter);
// Create an object with a named interceptor.
v8::Local<v8::Object> object = templ->NewInstance();
context1->Global()->Set(v8_str("interceptor_obj"), object);
// Force the object into the slow case.
CompileRun("interceptor_obj.y = 0;"
"delete interceptor_obj.y;");
context1->Exit();
{
// Introduce the object into a different context.
// Repeat named loads to exercise ICs.
LocalContext context2;
context2->Global()->Set(v8_str("interceptor_obj"), object);
Local<Value> result =
CompileRun("function get_x(o) { return o.x; }"
"interceptor_obj.x = 42;"
"for (var i=0; i != 10; i++) {"
" get_x(interceptor_obj);"
"}"
"get_x(interceptor_obj)");
// Check that the interceptor was actually invoked.
CHECK_EQ(result, v8_str("x"));
}
// Return to the original context and force some object to the slow case
// to cause the NormalizedMapCache to verify.
context1->Enter();
CompileRun("var obj = { x : 0 }; delete obj.x;");
context1->Exit();
context1.Dispose();
}
static v8::Handle<Value> SetXOnPrototypeGetter(Local<String> property, static v8::Handle<Value> SetXOnPrototypeGetter(Local<String> property,
const AccessorInfo& info) { const AccessorInfo& info) {
// Set x on the prototype object and do not handle the get request. // Set x on the prototype object and do not handle the get request.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment