Made store buffer compaction more predictable.

Review URL: https://codereview.chromium.org/11593026

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13233 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 171a4de7
......@@ -687,10 +687,15 @@ void StoreBuffer::Compact() {
uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
// Shift out the last bits including any tags.
int_addr >>= kPointerSizeLog2;
int hash1 =
((int_addr ^ (int_addr >> kHashSetLengthLog2)) & (kHashSetLength - 1));
// The upper part of an address is basically random because of ASLR and OS
// non-determinism, so we use only the bits within a page for hashing to
// make v8's behavior (more) deterministic.
uintptr_t hash_addr =
int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2);
int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) &
(kHashSetLength - 1));
if (hash_set_1_[hash1] == int_addr) continue;
uintptr_t hash2 = (int_addr - (int_addr >> kHashSetLengthLog2));
uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2));
hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
hash2 &= (kHashSetLength - 1);
if (hash_set_2_[hash2] == int_addr) continue;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment