Commit 63ec79a4 authored by danno@chromium.org's avatar danno@chromium.org

Revert r13494: "Use MemoryChunk-based allocation for deoptimization entry code"

This patch seems to cause crashes on Windows.

TBR=jkummerow@chromium.org

Review URL: https://codereview.chromium.org/12049069

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13495 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent de17ce77
......@@ -44,18 +44,8 @@ DeoptimizerData::DeoptimizerData() {
eager_deoptimization_entry_code_entries_ = -1;
lazy_deoptimization_entry_code_entries_ = -1;
size_t deopt_table_size = Deoptimizer::GetMaxDeoptTableSize();
MemoryAllocator* allocator = Isolate::Current()->memory_allocator();
size_t initial_commit_size = OS::CommitPageSize();
eager_deoptimization_entry_code_ =
allocator->AllocateChunk(deopt_table_size,
initial_commit_size,
EXECUTABLE,
NULL);
lazy_deoptimization_entry_code_ =
allocator->AllocateChunk(deopt_table_size,
initial_commit_size,
EXECUTABLE,
NULL);
eager_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
lazy_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
current_ = NULL;
deoptimizing_code_list_ = NULL;
#ifdef ENABLE_DEBUGGER_SUPPORT
......@@ -65,11 +55,9 @@ DeoptimizerData::DeoptimizerData() {
DeoptimizerData::~DeoptimizerData() {
Isolate::Current()->memory_allocator()->Free(
eager_deoptimization_entry_code_);
delete eager_deoptimization_entry_code_;
eager_deoptimization_entry_code_ = NULL;
Isolate::Current()->memory_allocator()->Free(
lazy_deoptimization_entry_code_);
delete lazy_deoptimization_entry_code_;
lazy_deoptimization_entry_code_ = NULL;
DeoptimizingCodeListNode* current = deoptimizing_code_list_;
......@@ -629,7 +617,7 @@ Address Deoptimizer::GetDeoptimizationEntry(int id,
GetEntryMode mode) {
ASSERT(id >= 0);
if (id >= kMaxNumberOfEntries) return NULL;
MemoryChunk* base = NULL;
VirtualMemory* base = NULL;
if (mode == ENSURE_ENTRY_CODE) {
EnsureCodeForDeoptimizationEntry(type, id);
} else {
......@@ -641,27 +629,28 @@ Address Deoptimizer::GetDeoptimizationEntry(int id,
} else {
base = data->lazy_deoptimization_entry_code_;
}
return base->area_start() + (id * table_entry_size_);
return
static_cast<Address>(base->address()) + (id * table_entry_size_);
}
int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
MemoryChunk* base = NULL;
VirtualMemory* base = NULL;
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) {
base = data->eager_deoptimization_entry_code_;
} else {
base = data->lazy_deoptimization_entry_code_;
}
Address start = base->area_start();
Address base_casted = reinterpret_cast<Address>(base->address());
if (base == NULL ||
addr < start ||
addr >= start + (kMaxNumberOfEntries * table_entry_size_)) {
addr < base->address() ||
addr >= base_casted + (kMaxNumberOfEntries * table_entry_size_)) {
return kNotDeoptimizationEntry;
}
ASSERT_EQ(0,
static_cast<int>(addr - start) % table_entry_size_);
return static_cast<int>(addr - start) / table_entry_size_;
static_cast<int>(addr - base_casted) % table_entry_size_);
return static_cast<int>(addr - base_casted) / table_entry_size_;
}
......@@ -1580,14 +1569,14 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(BailoutType type,
CodeDesc desc;
masm.GetCode(&desc);
MemoryChunk* chunk = type == EAGER
VirtualMemory* memory = type == EAGER
? data->eager_deoptimization_entry_code_
: data->lazy_deoptimization_entry_code_;
ASSERT(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >=
desc.instr_size);
chunk->CommitArea(desc.instr_size);
memcpy(chunk->area_start(), desc.buffer, desc.instr_size);
CPU::FlushICache(chunk->area_start(), desc.instr_size);
size_t table_size = Deoptimizer::GetMaxDeoptTableSize();
ASSERT(static_cast<int>(table_size) >= desc.instr_size);
memory->Commit(memory->address(), table_size, true);
memcpy(memory->address(), desc.buffer, desc.instr_size);
CPU::FlushICache(memory->address(), desc.instr_size);
if (type == EAGER) {
data->eager_deoptimization_entry_code_entries_ = entry_count;
......
......@@ -113,8 +113,8 @@ class DeoptimizerData {
private:
int eager_deoptimization_entry_code_entries_;
int lazy_deoptimization_entry_code_entries_;
MemoryChunk* eager_deoptimization_entry_code_;
MemoryChunk* lazy_deoptimization_entry_code_;
VirtualMemory* eager_deoptimization_entry_code_;
VirtualMemory* lazy_deoptimization_entry_code_;
Deoptimizer* current_;
#ifdef ENABLE_DEBUGGER_SUPPORT
......
......@@ -1311,7 +1311,6 @@ class Isolate {
friend class StackGuard;
friend class ThreadId;
friend class TestMemoryAllocatorScope;
friend class TestCodeRangeScope;
friend class v8::Isolate;
friend class v8::Locker;
friend class v8::Unlocker;
......
......@@ -206,18 +206,17 @@ void CodeRange::GetNextAllocationBlock(size_t requested) {
}
Address CodeRange::AllocateRawMemory(const size_t requested_size,
const size_t commit_size,
Address CodeRange::AllocateRawMemory(const size_t requested,
size_t* allocated) {
ASSERT(commit_size <= requested_size);
ASSERT(current_allocation_block_index_ < allocation_list_.length());
if (requested_size > allocation_list_[current_allocation_block_index_].size) {
if (requested > allocation_list_[current_allocation_block_index_].size) {
// Find an allocation block large enough. This function call may
// call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
GetNextAllocationBlock(requested_size);
GetNextAllocationBlock(requested);
}
// Commit the requested memory at the start of the current allocation block.
size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment);
FreeBlock current = allocation_list_[current_allocation_block_index_];
if (aligned_requested >= (current.size - Page::kPageSize)) {
// Don't leave a small free block, useless for a large object or chunk.
......@@ -227,10 +226,9 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size,
}
ASSERT(*allocated <= current.size);
ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
if (!MemoryAllocator::CommitExecutableMemory(code_range_,
current.start,
commit_size,
*allocated)) {
if (!MemoryAllocator::CommitCodePage(code_range_,
current.start,
*allocated)) {
*allocated = 0;
return NULL;
}
......@@ -243,16 +241,6 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size,
}
bool CodeRange::CommitRawMemory(Address start, size_t length) {
return code_range_->Commit(start, length, true);
}
bool CodeRange::UncommitRawMemory(Address start, size_t length) {
return code_range_->Uncommit(start, length);
}
void CodeRange::FreeRawMemory(Address address, size_t length) {
ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
free_list_.Add(FreeBlock(address, length));
......@@ -357,31 +345,27 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size,
if (!reservation.IsReserved()) return NULL;
size_ += reservation.size();
Address base = static_cast<Address>(reservation.address());
Address base = RoundUp(static_cast<Address>(reservation.address()),
alignment);
controller->TakeControl(&reservation);
return base;
}
Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
size_t commit_size,
Address MemoryAllocator::AllocateAlignedMemory(size_t size,
size_t alignment,
Executability executable,
VirtualMemory* controller) {
ASSERT(commit_size <= reserve_size);
VirtualMemory reservation;
Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
Address base = ReserveAlignedMemory(size, alignment, &reservation);
if (base == NULL) return NULL;
if (executable == EXECUTABLE) {
if (!CommitExecutableMemory(&reservation,
base,
commit_size,
reserve_size)) {
if (!CommitCodePage(&reservation, base, size)) {
base = NULL;
}
} else {
if (!reservation.Commit(base, commit_size, false)) {
if (!reservation.Commit(base, size, false)) {
base = NULL;
}
}
......@@ -485,53 +469,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
}
// Commit MemoryChunk area to the requested size.
bool MemoryChunk::CommitArea(size_t requested) {
size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
MemoryAllocator::CodePageGuardSize() : 0;
size_t header_size = area_start() - address() - guard_size;
size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize());
size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
OS::CommitPageSize());
if (commit_size > committed_size) {
// Commit size should be less or equal than the reserved size.
ASSERT(commit_size <= size() - 2 * guard_size);
// Append the committed area.
Address start = address() + committed_size + guard_size;
size_t length = commit_size - committed_size;
if (reservation_.IsReserved()) {
if (!reservation_.Commit(start, length, IsFlagSet(IS_EXECUTABLE))) {
return false;
}
} else {
CodeRange* code_range = heap_->isolate()->code_range();
ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
if (!code_range->CommitRawMemory(start, length)) return false;
}
if (Heap::ShouldZapGarbage()) {
heap_->isolate()->memory_allocator()->ZapBlock(start, length);
}
} else if (commit_size < committed_size) {
ASSERT(commit_size > 0);
// Shrink the committed area.
size_t length = committed_size - commit_size;
Address start = address() + committed_size + guard_size - length;
if (reservation_.IsReserved()) {
if (!reservation_.Uncommit(start, length)) return false;
} else {
CodeRange* code_range = heap_->isolate()->code_range();
ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
if (!code_range->UncommitRawMemory(start, length)) return false;
}
}
area_end_ = area_start_ + requested;
return true;
}
void MemoryChunk::InsertAfter(MemoryChunk* other) {
next_chunk_ = other->next_chunk_;
prev_chunk_ = other;
......@@ -552,12 +489,9 @@ void MemoryChunk::Unlink() {
}
MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
intptr_t commit_area_size,
MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
Executability executable,
Space* owner) {
ASSERT(commit_area_size <= reserve_area_size);
size_t chunk_size;
Heap* heap = isolate_->heap();
Address base = NULL;
......@@ -565,38 +499,8 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
Address area_start = NULL;
Address area_end = NULL;
//
// MemoryChunk layout:
//
// Executable
// +----------------------------+<- base aligned with MemoryChunk::kAlignment
// | Header |
// +----------------------------+<- base + CodePageGuardStartOffset
// | Guard |
// +----------------------------+<- area_start_
// | Area |
// +----------------------------+<- area_end_ (area_start + commit_area_size)
// | Committed but not used |
// +----------------------------+<- aligned at OS page boundary
// | Reserved but not committed |
// +----------------------------+<- aligned at OS page boundary
// | Guard |
// +----------------------------+<- base + chunk_size
//
// Non-executable
// +----------------------------+<- base aligned with MemoryChunk::kAlignment
// | Header |
// +----------------------------+<- area_start_ (base + kObjectStartOffset)
// | Area |
// +----------------------------+<- area_end_ (area_start + commit_area_size)
// | Committed but not used |
// +----------------------------+<- aligned at OS page boundary
// | Reserved but not committed |
// +----------------------------+<- base + chunk_size
//
if (executable == EXECUTABLE) {
chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
OS::CommitPageSize()) + CodePageGuardSize();
// Check executable memory limit.
......@@ -607,15 +511,10 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
return NULL;
}
// Size of header (not executable) plus area (executable).
size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
OS::CommitPageSize());
// Allocate executable memory either from code range or from the
// OS.
if (isolate_->code_range()->exists()) {
base = isolate_->code_range()->AllocateRawMemory(chunk_size,
commit_size,
&chunk_size);
base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
MemoryChunk::kAlignment));
if (base == NULL) return NULL;
......@@ -624,7 +523,6 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
size_executable_ += chunk_size;
} else {
base = AllocateAlignedMemory(chunk_size,
commit_size,
MemoryChunk::kAlignment,
executable,
&reservation);
......@@ -635,18 +533,14 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
if (Heap::ShouldZapGarbage()) {
ZapBlock(base, CodePageGuardStartOffset());
ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
ZapBlock(base + CodePageAreaStartOffset(), body_size);
}
area_start = base + CodePageAreaStartOffset();
area_end = area_start + commit_area_size;
area_end = area_start + body_size;
} else {
chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
OS::CommitPageSize());
size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
commit_area_size, OS::CommitPageSize());
chunk_size = MemoryChunk::kObjectStartOffset + body_size;
base = AllocateAlignedMemory(chunk_size,
commit_size,
MemoryChunk::kAlignment,
executable,
&reservation);
......@@ -654,15 +548,13 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
if (base == NULL) return NULL;
if (Heap::ShouldZapGarbage()) {
ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
ZapBlock(base, chunk_size);
}
area_start = base + Page::kObjectStartOffset;
area_end = area_start + commit_area_size;
area_end = base + chunk_size;
}
// Use chunk_size for statistics and callbacks because we assume that they
// treat reserved but not-yet committed memory regions of chunks as allocated.
isolate_->counters()->memory_allocated()->
Increment(static_cast<int>(chunk_size));
......@@ -687,7 +579,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
Page* MemoryAllocator::AllocatePage(intptr_t size,
PagedSpace* owner,
Executability executable) {
MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
MemoryChunk* chunk = AllocateChunk(size, executable, owner);
if (chunk == NULL) return NULL;
......@@ -698,10 +590,7 @@ Page* MemoryAllocator::AllocatePage(intptr_t size,
LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
Space* owner,
Executability executable) {
MemoryChunk* chunk = AllocateChunk(object_size,
object_size,
executable,
owner);
MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
if (chunk == NULL) return NULL;
return LargePage::Initialize(isolate_->heap(), chunk);
}
......@@ -843,10 +732,9 @@ int MemoryAllocator::CodePageAreaEndOffset() {
}
bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
Address start,
size_t commit_size,
size_t reserved_size) {
bool MemoryAllocator::CommitCodePage(VirtualMemory* vm,
Address start,
size_t size) {
// Commit page header (not executable).
if (!vm->Commit(start,
CodePageGuardStartOffset(),
......@@ -860,14 +748,15 @@ bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
}
// Commit page body (executable).
size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize();
if (!vm->Commit(start + CodePageAreaStartOffset(),
commit_size - CodePageGuardStartOffset(),
area_size,
true)) {
return false;
}
// Create guard page before the end.
if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
// Create guard page after the allocatable area.
if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) {
return false;
}
......
......@@ -645,7 +645,6 @@ class MemoryChunk {
int area_size() {
return static_cast<int>(area_end() - area_start());
}
bool CommitArea(size_t requested);
// Approximate amount of physical memory committed for this chunk.
size_t CommittedPhysicalMemory() {
......@@ -888,11 +887,8 @@ class CodeRange {
// Allocates a chunk of memory from the large-object portion of
// the code range. On platforms with no separate code range, should
// not be called.
MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
const size_t commit_size,
MUST_USE_RESULT Address AllocateRawMemory(const size_t requested,
size_t* allocated);
bool CommitRawMemory(Address start, size_t length);
bool UncommitRawMemory(Address start, size_t length);
void FreeRawMemory(Address buf, size_t length);
private:
......@@ -1040,19 +1036,14 @@ class MemoryAllocator {
void ReportStatistics();
#endif
// Returns a MemoryChunk in which the memory region from commit_area_size to
// reserve_area_size of the chunk area is reserved but not committed, it
// could be committed later by calling MemoryChunk::CommitArea.
MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
intptr_t commit_area_size,
MemoryChunk* AllocateChunk(intptr_t body_size,
Executability executable,
Space* space);
Address ReserveAlignedMemory(size_t requested,
size_t alignment,
VirtualMemory* controller);
Address AllocateAlignedMemory(size_t reserve_size,
size_t commit_size,
Address AllocateAlignedMemory(size_t requested,
size_t alignment,
Executability executable,
VirtualMemory* controller);
......@@ -1102,10 +1093,9 @@ class MemoryAllocator {
return CodePageAreaEndOffset() - CodePageAreaStartOffset();
}
MUST_USE_RESULT static bool CommitExecutableMemory(VirtualMemory* vm,
Address start,
size_t commit_size,
size_t reserved_size);
MUST_USE_RESULT static bool CommitCodePage(VirtualMemory* vm,
Address start,
size_t size);
private:
Isolate* isolate_;
......
......@@ -204,9 +204,7 @@ TEST(CodeRange) {
(Page::kMaxNonCodeHeapObjectSize << (Pseudorandom() % 3)) +
Pseudorandom() % 5000 + 1;
size_t allocated = 0;
Address base = code_range->AllocateRawMemory(requested,
requested,
&allocated);
Address base = code_range->AllocateRawMemory(requested, &allocated);
CHECK(base != NULL);
blocks.Add(Block(base, static_cast<int>(allocated)));
current_allocated += static_cast<int>(allocated);
......
......@@ -121,148 +121,9 @@ class TestMemoryAllocatorScope {
DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope);
};
// Temporarily sets a given code range in an isolate.
class TestCodeRangeScope {
public:
TestCodeRangeScope(Isolate* isolate, CodeRange* code_range)
: isolate_(isolate),
old_code_range_(isolate->code_range_) {
isolate->code_range_ = code_range;
}
~TestCodeRangeScope() {
isolate_->code_range_ = old_code_range_;
}
private:
Isolate* isolate_;
CodeRange* old_code_range_;
DISALLOW_COPY_AND_ASSIGN(TestCodeRangeScope);
};
} } // namespace v8::internal
static void VerifyMemoryChunk(Isolate* isolate,
Heap* heap,
CodeRange* code_range,
size_t reserve_area_size,
size_t commit_area_size,
size_t second_commit_area_size,
Executability executable) {
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
CHECK(memory_allocator->SetUp(heap->MaxReserved(),
heap->MaxExecutableSize()));
TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
TestCodeRangeScope test_code_range_scope(isolate, code_range);
size_t header_size = (executable == EXECUTABLE)
? MemoryAllocator::CodePageGuardStartOffset()
: MemoryChunk::kObjectStartOffset;
size_t guard_size = (executable == EXECUTABLE)
? MemoryAllocator::CodePageGuardSize()
: 0;
MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(reserve_area_size,
commit_area_size,
executable,
NULL);
size_t alignment = code_range->exists() ?
MemoryChunk::kAlignment : OS::CommitPageSize();
size_t reserved_size = ((executable == EXECUTABLE))
? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
alignment)
: RoundUp(header_size + reserve_area_size, OS::CommitPageSize());
CHECK(memory_chunk->size() == reserved_size);
CHECK(memory_chunk->area_start() < memory_chunk->address() +
memory_chunk->size());
CHECK(memory_chunk->area_end() <= memory_chunk->address() +
memory_chunk->size());
CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
Address area_start = memory_chunk->area_start();
memory_chunk->CommitArea(second_commit_area_size);
CHECK(area_start == memory_chunk->area_start());
CHECK(memory_chunk->area_start() < memory_chunk->address() +
memory_chunk->size());
CHECK(memory_chunk->area_end() <= memory_chunk->address() +
memory_chunk->size());
CHECK(static_cast<size_t>(memory_chunk->area_size()) ==
second_commit_area_size);
memory_allocator->Free(memory_chunk);
memory_allocator->TearDown();
delete memory_allocator;
}
static unsigned int Pseudorandom() {
static uint32_t lo = 2345;
lo = 18273 * (lo & 0xFFFFF) + (lo >> 16);
return lo & 0xFFFFF;
}
TEST(MemoryChunk) {
OS::SetUp();
Isolate* isolate = Isolate::Current();
isolate->InitializeLoggingAndCounters();
Heap* heap = isolate->heap();
CHECK(heap->ConfigureHeapDefault());
size_t reserve_area_size = 1 * MB;
size_t initial_commit_area_size, second_commit_area_size;
for (int i = 0; i < 100; i++) {
initial_commit_area_size = Pseudorandom();
second_commit_area_size = Pseudorandom();
// With CodeRange.
CodeRange* code_range = new CodeRange(isolate);
const int code_range_size = 32 * MB;
if (!code_range->SetUp(code_range_size)) return;
VerifyMemoryChunk(isolate,
heap,
code_range,
reserve_area_size,
initial_commit_area_size,
second_commit_area_size,
EXECUTABLE);
VerifyMemoryChunk(isolate,
heap,
code_range,
reserve_area_size,
initial_commit_area_size,
second_commit_area_size,
NOT_EXECUTABLE);
delete code_range;
// Without CodeRange.
code_range = NULL;
VerifyMemoryChunk(isolate,
heap,
code_range,
reserve_area_size,
initial_commit_area_size,
second_commit_area_size,
EXECUTABLE);
VerifyMemoryChunk(isolate,
heap,
code_range,
reserve_area_size,
initial_commit_area_size,
second_commit_area_size,
NOT_EXECUTABLE);
}
}
TEST(MemoryAllocator) {
OS::SetUp();
Isolate* isolate = Isolate::Current();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment