Commit 9212be86 authored by machenbach's avatar machenbach Committed by Commit bot

Revert of [turbofan] Run everything after representation selection...

Revert of [turbofan] Run everything after representation selection concurrently. (patchset #2 id:20001 of https://codereview.chromium.org/1926023002/ )

Reason for revert:
[Sheriff] Flaky crashed here and there:
https://build.chromium.org/p/client.v8/builders/V8%20Linux/builds/9867
https://build.chromium.org/p/client.v8/builders/V8%20Linux64/builds/9589
https://build.chromium.org/p/client.v8/builders/V8%20Mac/builds/7679

Original issue's description:
> [turbofan] Run everything after representation selection concurrently.
>
> Further refactor the pipeline to even run the first scheduler (part of
> the effect control linearization) concurrently. This temporarily
> disables most of the write barrier elimination, but we will get back to
> that later.

TBR=mstarzinger@chromium.org,bmeurer@chromium.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true

Review-Url: https://codereview.chromium.org/1925073002
Cr-Commit-Position: refs/heads/master@{#35863}
parent 5595d357
......@@ -2661,44 +2661,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : requested object size (tagged)
// -- cp : context
// -----------------------------------
__ AssertSmi(r1);
Label runtime;
__ SmiUntag(r1);
__ Allocate(r1, r0, r2, r3, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ SmiTag(r1);
__ Push(r1);
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
// static
void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : requested object size (tagged)
// -- cp : context
// -----------------------------------
__ AssertSmi(r1);
Label runtime;
__ SmiUntag(r1);
__ Allocate(r1, r0, r2, r3, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ SmiTag(r1);
__ Push(r1);
__ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
......
......@@ -249,7 +249,7 @@ SIMD128_TYPES(SIMD128_ALLOC_DESC)
void AllocateDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1};
Register registers[] = {r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
......
......@@ -2751,46 +2751,6 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_AllocateInNewSpace");
// ----------- S t a t e -------------
// -- x1 : requested object size (tagged)
// -- cp : context
// -----------------------------------
__ AssertSmi(x1);
Label runtime;
__ SmiUntag(x1);
__ Allocate(x1, x0, x2, x3, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ Bind(&runtime);
__ SmiTag(x1);
__ Push(x1);
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
// static
void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_AllocateInOldSpace");
// ----------- S t a t e -------------
// -- x1 : requested object size (tagged)
// -- cp : context
// -----------------------------------
__ AssertSmi(x1);
Label runtime;
__ SmiUntag(x1);
__ Allocate(x1, x0, x2, x3, &runtime, PRETENURE);
__ Ret();
__ Bind(&runtime);
__ SmiTag(x1);
__ Push(x1);
__ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
......
......@@ -274,7 +274,7 @@ SIMD128_TYPES(SIMD128_ALLOC_DESC)
void AllocateDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {x1};
Register registers[] = {x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
......
......@@ -172,9 +172,6 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
// Define list of builtins implemented in assembly.
#define BUILTIN_LIST_A(V) \
V(AllocateInNewSpace, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(AllocateInOldSpace, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\
V(ConstructedNonConstructable, BUILTIN, UNINITIALIZED, kNoExtraICState) \
......@@ -442,8 +439,6 @@ class Builtins {
static void Generate_Adaptor(MacroAssembler* masm,
CFunctionId id,
BuiltinExtraArguments extra_args);
static void Generate_AllocateInNewSpace(MacroAssembler* masm);
static void Generate_AllocateInOldSpace(MacroAssembler* masm);
static void Generate_ConstructedNonConstructable(MacroAssembler* masm);
static void Generate_CompileLazy(MacroAssembler* masm);
static void Generate_CompileBaseline(MacroAssembler* masm);
......
......@@ -516,10 +516,8 @@ SIMD128_TYPES(SIMD128_ALLOC)
// static
Callable CodeFactory::Allocate(Isolate* isolate, PretenureFlag pretenure_flag) {
return Callable(pretenure_flag == NOT_TENURED
? isolate->builtins()->AllocateInNewSpace()
: isolate->builtins()->AllocateInOldSpace(),
AllocateDescriptor(isolate));
AllocateStub stub(isolate, pretenure_flag);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
......
......@@ -1289,6 +1289,16 @@ Handle<Code> TransitionElementsKindStub::GenerateCode() {
return DoGenerateCode(this);
}
template <>
HValue* CodeStubGraphBuilder<AllocateStub>::BuildCodeStub() {
HValue* result =
Add<HAllocate>(GetParameter(0), HType::Tagged(),
casted_stub()->pretenure_flag(), JS_OBJECT_TYPE);
return result;
}
Handle<Code> AllocateStub::GenerateCode() { return DoGenerateCode(this); }
HValue* CodeStubGraphBuilderBase::BuildArrayConstructor(
ElementsKind kind,
AllocationSiteOverrideMode override_mode,
......
......@@ -4006,6 +4006,10 @@ void AllocateMutableHeapNumberStub::InitializeDescriptor(
SIMD128_TYPES(SIMD128_INIT_DESC)
#undef SIMD128_INIT_DESC
void AllocateStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize();
}
void ToBooleanICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(FUNCTION_ADDR(Runtime_ToBooleanIC_Miss));
descriptor->SetMissHandler(ExternalReference(
......
......@@ -57,6 +57,7 @@ namespace internal {
V(VectorStoreIC) \
V(VectorKeyedStoreIC) \
/* HydrogenCodeStubs */ \
V(Allocate) \
V(ArrayNArgumentsConstructor) \
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
......@@ -2749,6 +2750,23 @@ class AllocateMutableHeapNumberStub : public TurboFanCodeStub {
SIMD128_TYPES(SIMD128_ALLOC_STUB)
#undef SIMD128_ALLOC_STUB
class AllocateStub final : public HydrogenCodeStub {
public:
AllocateStub(Isolate* isolate, PretenureFlag pretenure_flag)
: HydrogenCodeStub(isolate) {
set_sub_minor_key(PretenureFlagBits::encode(pretenure_flag));
}
PretenureFlag pretenure_flag() const {
return PretenureFlagBits::decode(sub_minor_key());
}
private:
typedef BitField<PretenureFlag, 0, 1> PretenureFlagBits;
DEFINE_CALL_INTERFACE_DESCRIPTOR(Allocate);
DEFINE_HYDROGEN_CODE_STUB(Allocate, HydrogenCodeStub);
};
class ArrayConstructorStubBase : public HydrogenCodeStub {
public:
ArrayConstructorStubBase(Isolate* isolate,
......
This diff is collapsed.
......@@ -27,24 +27,38 @@ class ChangeLowering final : public Reducer {
Reduction Reduce(Node* node) final;
private:
Node* HeapNumberValueIndexConstant();
Node* SmiShiftBitsConstant();
Node* ChangeInt32ToFloat64(Node* value);
Node* ChangeInt32ToSmi(Node* value);
Node* ChangeSmiToFloat64(Node* value);
Node* ChangeSmiToWord32(Node* value);
Node* ChangeUint32ToFloat64(Node* value);
Node* ChangeUint32ToSmi(Node* value);
Node* LoadHeapNumberValue(Node* value, Node* control);
Node* TestNotSmi(Node* value);
Reduction ReduceChangeBitToBool(Node* value, Node* control);
Reduction ReduceChangeBoolToBit(Node* value);
Reduction ReduceChangeInt31ToTagged(Node* value, Node* control);
Reduction ReduceChangeTaggedSignedToInt32(Node* value);
Reduction ChangeBitToBool(Node* value, Node* control);
Reduction ChangeBoolToBit(Node* value);
Reduction ChangeFloat64ToTagged(Node* value, Node* control);
Reduction ChangeInt31ToTagged(Node* value, Node* control);
Reduction ChangeInt32ToTagged(Node* value, Node* control);
Reduction ChangeTaggedSignedToInt32(Node* value);
Reduction ChangeUint32ToTagged(Node* value, Node* control);
Reduction ReduceLoadField(Node* node);
Reduction ReduceStoreField(Node* node);
Reduction ReduceLoadElement(Node* node);
Reduction ReduceStoreElement(Node* node);
Reduction ReduceAllocate(Node* node);
Reduction LoadField(Node* node);
Reduction StoreField(Node* node);
Reduction LoadElement(Node* node);
Reduction StoreElement(Node* node);
Reduction Allocate(Node* node);
Reduction ReduceObjectIsSmi(Node* node);
Node* IsSmi(Node* value);
Node* LoadHeapObjectMap(Node* object, Node* control);
Node* LoadMapBitField(Node* map);
Node* LoadMapInstanceType(Node* map);
Reduction ObjectIsSmi(Node* node);
Node* ComputeIndex(const ElementAccess& access, Node* const key);
Graph* graph() const;
......
......@@ -17,7 +17,7 @@ Node** CommonNodeCache::FindExternalConstant(ExternalReference value) {
Node** CommonNodeCache::FindHeapConstant(Handle<HeapObject> value) {
return heap_constants_.Find(zone(), bit_cast<intptr_t>(value.address()));
return heap_constants_.Find(zone(), bit_cast<intptr_t>(value.location()));
}
......
......@@ -513,16 +513,6 @@ Constant::Constant(RelocatablePtrConstantInfo info)
}
#endif
Handle<HeapObject> Constant::ToHeapObject() const {
DCHECK_EQ(kHeapObject, type());
Handle<HeapObject> value(
bit_cast<HeapObject**>(static_cast<intptr_t>(value_)));
if (value->IsConsString()) {
value = String::Flatten(Handle<String>::cast(value), TENURED);
}
return value;
}
std::ostream& operator<<(std::ostream& os, const Constant& constant) {
switch (constant.type()) {
case Constant::kInt32:
......
......@@ -1000,7 +1000,10 @@ class Constant final {
return RpoNumber::FromInt(static_cast<int>(value_));
}
Handle<HeapObject> ToHeapObject() const;
Handle<HeapObject> ToHeapObject() const {
DCHECK_EQ(kHeapObject, type());
return bit_cast<Handle<HeapObject> >(static_cast<intptr_t>(value_));
}
private:
Type type_;
......
......@@ -81,6 +81,9 @@ Node* JSGraph::NaNConstant() {
Node* JSGraph::HeapConstant(Handle<HeapObject> value) {
if (value->IsConsString()) {
value = String::Flatten(Handle<String>::cast(value), TENURED);
}
Node** loc = cache_.FindHeapConstant(value);
if (*loc == nullptr) {
*loc = graph()->NewNode(common()->HeapConstant(value));
......
......@@ -1302,11 +1302,13 @@ bool Pipeline::CreateGraph() {
}
// Type the graph.
Typer typer(isolate(), data->graph(), info()->is_deoptimization_enabled()
? Typer::kDeoptimizationEnabled
: Typer::kNoFlags,
info()->dependencies());
Run<TyperPhase>(&typer);
base::SmartPointer<Typer> typer;
typer.Reset(new Typer(isolate(), data->graph(),
info()->is_deoptimization_enabled()
? Typer::kDeoptimizationEnabled
: Typer::kNoFlags,
info()->dependencies()));
Run<TyperPhase>(typer.get());
RunPrintAndVerify("Typed");
BeginPhaseKind("lowering");
......@@ -1333,26 +1335,16 @@ bool Pipeline::CreateGraph() {
Run<EarlyOptimizationPhase>();
RunPrintAndVerify("Early optimized");
EndPhaseKind();
return true;
}
bool Pipeline::OptimizeGraph(Linkage* linkage) {
PipelineData* data = this->data_;
BeginPhaseKind("block building");
Run<EffectControlLinearizationPhase>();
RunPrintAndVerify("Effect and control linearized", true);
RunPrintAndVerify("Effect and control linearized");
Run<BranchEliminationPhase>();
RunPrintAndVerify("Branch conditions eliminated", true);
RunPrintAndVerify("Branch conditions eliminated");
// Optimize control flow.
if (FLAG_turbo_cf_optimization) {
Run<ControlFlowOptimizationPhase>();
RunPrintAndVerify("Control flow optimized", true);
RunPrintAndVerify("Control flow optimized");
}
// Lower changes that have been inserted before.
......@@ -1360,6 +1352,19 @@ bool Pipeline::OptimizeGraph(Linkage* linkage) {
// TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
RunPrintAndVerify("Late optimized", true);
// Kill the Typer and thereby uninstall the decorator (if any).
typer.Reset(nullptr);
EndPhaseKind();
return true;
}
bool Pipeline::OptimizeGraph(Linkage* linkage) {
PipelineData* data = this->data_;
BeginPhaseKind("block building");
Run<LateGraphTrimmingPhase>();
// TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
RunPrintAndVerify("Late trimmed", true);
......
......@@ -43,10 +43,6 @@ class HandleBase {
V8_INLINE bool is_null() const { return location_ == nullptr; }
// Returns the raw address where this handle is stored. This should only be
// used for hashing handles; do not ever try to dereference it.
V8_INLINE Address address() const { return bit_cast<Address>(location_); }
protected:
// Provides the C++ dereference operator.
V8_INLINE Object* operator*() const {
......@@ -136,14 +132,14 @@ class Handle final : public HandleBase {
// Provide function object for location equality comparison.
struct equal_to : public std::binary_function<Handle<T>, Handle<T>, bool> {
V8_INLINE bool operator()(Handle<T> lhs, Handle<T> rhs) const {
return lhs.address() == rhs.address();
return lhs.location() == rhs.location();
}
};
// Provide function object for location hashing.
struct hash : public std::unary_function<Handle<T>, size_t> {
V8_INLINE size_t operator()(Handle<T> const& handle) const {
return base::hash<void*>()(handle.address());
return base::hash<void*>()(handle.location());
}
};
......
......@@ -2618,48 +2618,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- edx : requested object size (tagged)
// -- esi : context
// -----------------------------------
__ AssertSmi(edx);
Label runtime;
__ SmiUntag(edx);
__ Allocate(edx, eax, ecx, edi, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ SmiTag(edx);
__ PopReturnAddressTo(ecx);
__ Push(edx);
__ PushReturnAddressFrom(ecx);
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
// static
void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- edx : requested object size (tagged)
// -- esi : context
// -----------------------------------
__ AssertSmi(edx);
Label runtime;
__ SmiUntag(edx);
__ Allocate(edx, eax, ecx, edi, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ SmiTag(edx);
__ PopReturnAddressTo(ecx);
__ Push(edx);
__ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ PushReturnAddressFrom(ecx);
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
......
......@@ -253,7 +253,7 @@ SIMD128_TYPES(SIMD128_ALLOC_DESC)
void AllocateDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edx};
Register registers[] = {eax};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
......
......@@ -2736,44 +2736,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : requested object size (tagged)
// -- cp : context
// -----------------------------------
__ AssertSmi(a0);
Label runtime;
__ SmiUntag(a0);
__ Allocate(a0, v0, a1, a2, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ SmiTag(a0);
__ Push(a0);
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
// static
void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : requested object size (tagged)
// -- cp : context
// -----------------------------------
__ AssertSmi(a0);
Label runtime;
__ SmiUntag(a0);
__ Allocate(a0, v0, a1, a2, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ SmiTag(a0);
__ Push(a0);
__ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// State setup as expected by MacroAssembler::InvokePrologue.
......
......@@ -2724,44 +2724,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : requested object size (tagged)
// -- cp : context
// -----------------------------------
__ AssertSmi(a0);
Label runtime;
__ SmiUntag(a0);
__ Allocate(a0, v0, a1, a2, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ SmiTag(a0);
__ Push(a0);
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
// static
void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : requested object size (tagged)
// -- cp : context
// -----------------------------------
__ AssertSmi(a0);
Label runtime;
__ SmiUntag(a0);
__ Allocate(a0, v0, a1, a2, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ SmiTag(a0);
__ Push(a0);
__ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// State setup as expected by MacroAssembler::InvokePrologue.
......
......@@ -2042,48 +2042,6 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
__ PushReturnAddressFrom(rcx);
}
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rdx : requested object size (tagged)
// -- rsi : context
// -----------------------------------
__ AssertSmi(rdx);
Label runtime;
__ SmiToInteger64(rdx, rdx);
__ Allocate(rdx, rax, rcx, rdi, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ Integer32ToSmi(rdx, rdx);
__ PopReturnAddressTo(rcx);
__ Push(rdx);
__ PushReturnAddressFrom(rcx);
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
// static
void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rdx : requested object size (tagged)
// -- rsi : context
// -----------------------------------
__ AssertSmi(rdx);
Label runtime;
__ SmiToInteger64(rdx, rdx);
__ Allocate(rdx, rax, rcx, rdi, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ Integer32ToSmi(rdx, rdx);
__ PopReturnAddressTo(rcx);
__ Push(rdx);
__ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ PushReturnAddressFrom(rcx);
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
......
......@@ -244,7 +244,7 @@ SIMD128_TYPES(SIMD128_ALLOC_DESC)
void AllocateDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rdx};
Register registers[] = {rax};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
......
......@@ -1370,7 +1370,7 @@ TEST(LowerStoreField_to_store) {
CHECK_EQ(IrOpcode::kStore, store->opcode());
CHECK_EQ(t.p1, store->InputAt(2));
StoreRepresentation rep = StoreRepresentationOf(store->op());
CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
}
}
......@@ -1435,7 +1435,7 @@ TEST(LowerStoreElement_to_store) {
CHECK_EQ(IrOpcode::kStore, store->opcode());
CHECK_EQ(t.p2, store->InputAt(2));
StoreRepresentation rep = StoreRepresentationOf(store->op());
CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment