Commit 034b89cc authored by kasperl@chromium.org's avatar kasperl@chromium.org

Refactor the smi case inlining for binary operations, so

it's easier to inline the code on demand. Right now, we still
only inline the smi case code for bitwise operations.
Review URL: http://codereview.chromium.org/7669

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@547 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent ca37dab9
......@@ -428,12 +428,24 @@ void Assembler::pop(Register dst) {
// relocation information generated between the last instruction and this
// pop instruction.
byte instr = last_pc_[0];
if (instr == (0x50 | dst.code())) {
pc_ = last_pc_;
last_pc_ = NULL;
if (FLAG_print_push_pop_elimination) {
PrintF("%d push/pop (same reg) eliminated\n", pc_offset());
if ((instr & ~0x7) == 0x50) {
int push_reg_code = instr & 0x7;
if (push_reg_code == dst.code()) {
pc_ = last_pc_;
if (FLAG_print_push_pop_elimination) {
PrintF("%d push/pop (same reg) eliminated\n", pc_offset());
}
} else {
// Convert 'push src; pop dst' to 'mov dst, src'.
last_pc_[0] = 0x8b;
Register src = { push_reg_code };
EnsureSpace ensure_space(this);
emit_operand(dst, Operand(src));
if (FLAG_print_push_pop_elimination) {
PrintF("%d push/pop (reg->reg) eliminated\n", pc_offset());
}
}
last_pc_ = NULL;
return;
} else if (instr == 0xff) { // push of an operand, convert to a move
byte op1 = last_pc_[1];
......@@ -2043,6 +2055,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
reloc_info_writer.Write(&rinfo);
}
void Assembler::WriteInternalReference(int position, const Label& bound_label) {
ASSERT(bound_label.is_bound());
ASSERT(0 <= position);
......
......@@ -520,8 +520,8 @@ void CodeGenerator::LoadGlobal() {
void CodeGenerator::LoadGlobalReceiver(Register scratch) {
__ mov(scratch, ContextOperand(esi, Context::GLOBAL_INDEX));
__ push(FieldOperand(scratch, GlobalObject::kGlobalReceiverOffset));
__ mov(scratch, GlobalObject());
frame_->Push(FieldOperand(scratch, GlobalObject::kGlobalReceiverOffset));
}
......@@ -692,34 +692,50 @@ class FloatingPointHelper : public AllStatic {
};
// Flag that indicates whether or not the code for dealing with smis
// is inlined or should be dealt with in the stub.
enum GenericBinaryFlags {
SMI_CODE_IN_STUB,
SMI_CODE_INLINED
};
class GenericBinaryOpStub: public CodeStub {
public:
GenericBinaryOpStub(Token::Value op, OverwriteMode mode)
: op_(op), mode_(mode) { }
GenericBinaryOpStub(Token::Value op,
OverwriteMode mode,
GenericBinaryFlags flags)
: op_(op), mode_(mode), flags_(flags) { }
void GenerateSmiCode(MacroAssembler* masm, Label* slow);
private:
Token::Value op_;
OverwriteMode mode_;
GenericBinaryFlags flags_;
const char* GetName();
#ifdef DEBUG
void Print() {
PrintF("GenericBinaryOpStub (op %s), (mode %d)\n",
PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
Token::String(op_),
static_cast<int>(mode_));
static_cast<int>(mode_),
static_cast<int>(flags_));
}
#endif
// Minor key encoding in 16 bits OOOOOOOOOOOOOOMM.
// Minor key encoding in 16 bits FOOOOOOOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 14> {};
class OpBits: public BitField<Token::Value, 2, 13> {};
class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
Major MajorKey() { return GenericBinaryOp; }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
return OpBits::encode(op_) |
ModeBits::encode(mode_);
ModeBits::encode(mode_) |
FlagBits::encode(flags_);
}
void Generate(MacroAssembler* masm);
};
......@@ -742,115 +758,84 @@ const char* GenericBinaryOpStub::GetName() {
}
class DeferredInlineBinaryOperation: public DeferredCode {
public:
DeferredInlineBinaryOperation(CodeGenerator* generator,
Token::Value op,
OverwriteMode mode,
GenericBinaryFlags flags)
: DeferredCode(generator), stub_(op, mode, flags) { }
void GenerateInlineCode() {
stub_.GenerateSmiCode(masm(), enter());
}
virtual void Generate() {
__ push(ebx);
__ CallStub(&stub_);
// We must preserve the eax value here, because it will be written
// to the top-of-stack element when getting back to the fast case
// code. See comment in GenericBinaryOperation where
// deferred->exit() is bound.
__ push(eax);
}
private:
GenericBinaryOpStub stub_;
};
void CodeGenerator::GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode) {
OverwriteMode overwrite_mode) {
Comment cmnt(masm_, "[ BinaryOperation");
Comment cmnt_token(masm_, Token::String(op));
if (op == Token::COMMA) {
// Simply discard left value.
frame_->Pop(eax);
frame_->Pop();
frame_->Push(eax);
return;
}
// For now, we keep the old behavior and only inline the smi code
// for the bitwise operations.
GenericBinaryFlags flags;
switch (op) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV:
case Token::MOD: {
GenericBinaryOpStub stub(op, overwrite_mode);
__ CallStub(&stub);
frame_->Push(eax);
break;
}
case Token::BIT_OR:
case Token::BIT_AND:
case Token::BIT_XOR: {
Label slow, exit;
frame_->Pop(eax); // get y
frame_->Pop(edx); // get x
__ mov(ecx, Operand(edx)); // Prepare smi check.
// tag check
__ or_(ecx, Operand(eax)); // ecx = x | y;
ASSERT(kSmiTag == 0); // adjust code below
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &slow, taken);
switch (op) {
case Token::BIT_OR: __ or_(eax, Operand(edx)); break;
case Token::BIT_AND: __ and_(eax, Operand(edx)); break;
case Token::BIT_XOR: __ xor_(eax, Operand(edx)); break;
default: UNREACHABLE();
}
__ jmp(&exit);
__ bind(&slow);
frame_->Push(edx); // restore stack slots
frame_->Push(eax);
GenericBinaryOpStub stub(op, overwrite_mode);
__ CallStub(&stub);
__ bind(&exit);
frame_->Push(eax); // push the result to the stack
break;
}
case Token::BIT_XOR:
case Token::SHL:
case Token::SHR:
case Token::SAR: {
Label slow, exit;
frame_->Pop(edx); // get y
frame_->Pop(eax); // get x
// tag check
__ mov(ecx, Operand(edx));
__ or_(ecx, Operand(eax)); // ecx = x | y;
ASSERT(kSmiTag == 0); // adjust code below
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &slow, not_taken);
// get copies of operands
__ mov(ebx, Operand(eax));
__ mov(ecx, Operand(edx));
// remove tags from operands (but keep sign)
__ sar(ebx, kSmiTagSize);
__ sar(ecx, kSmiTagSize);
// perform operation
switch (op) {
case Token::SAR:
__ sar(ebx);
// no checks of result necessary
break;
case Token::SHR:
__ shr(ebx);
// Check that the *unsigned* result fits in a smi.
// neither of the two high-order bits can be set:
// - 0x80000000: high bit would be lost when smi tagging.
// - 0x40000000: this number would convert to negative when
// smi tagging these two cases can only happen with shifts
// by 0 or 1 when handed a valid smi.
__ test(ebx, Immediate(0xc0000000));
__ j(not_zero, &slow, not_taken);
break;
case Token::SHL:
__ shl(ebx);
// Check that the *signed* result fits in a smi.
__ lea(ecx, Operand(ebx, 0x40000000));
__ test(ecx, Immediate(0x80000000));
__ j(not_zero, &slow, not_taken);
break;
default: UNREACHABLE();
}
// tag result and store it in TOS (eax)
ASSERT(kSmiTagSize == times_2); // adjust code if not the case
__ lea(eax, Operand(ebx, times_2, kSmiTag));
__ jmp(&exit);
// slow case
__ bind(&slow);
frame_->Push(eax); // restore stack
frame_->Push(edx);
GenericBinaryOpStub stub(op, overwrite_mode);
__ CallStub(&stub);
__ bind(&exit);
frame_->Push(eax);
case Token::SAR:
flags = SMI_CODE_INLINED;
break;
}
case Token::COMMA: {
// simply discard left value
frame_->Pop(eax);
frame_->Pop();
frame_->Push(eax);
default:
flags = SMI_CODE_IN_STUB;
break;
}
default: UNREACHABLE();
}
if (flags == SMI_CODE_INLINED) {
// Create a new deferred code for the slow-case part.
DeferredInlineBinaryOperation* deferred =
new DeferredInlineBinaryOperation(this, op, overwrite_mode, flags);
// Fetch the operands from the stack.
frame_->Pop(ebx); // get y
__ mov(eax, frame_->Top()); // get x
// Generate the inline part of the code.
deferred->GenerateInlineCode();
// Put result back on the stack. It seems somewhat weird to let
// the deferred code jump back before the assignment to the frame
// top, but this is just to let the peephole optimizer get rid of
// more code.
__ bind(deferred->exit());
__ mov(frame_->Top(), eax);
} else {
// Call the stub and push the result to the stack.
GenericBinaryOpStub stub(op, overwrite_mode, flags);
__ CallStub(&stub);
frame_->Push(eax);
}
}
......@@ -867,7 +852,7 @@ class DeferredInlinedSmiOperation: public DeferredCode {
virtual void Generate() {
__ push(eax);
__ push(Immediate(Smi::FromInt(value_)));
GenericBinaryOpStub igostub(op_, overwrite_mode_);
GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED);
__ CallStub(&igostub);
}
......@@ -890,7 +875,7 @@ class DeferredInlinedSmiOperationReversed: public DeferredCode {
virtual void Generate() {
__ push(Immediate(Smi::FromInt(value_)));
__ push(eax);
GenericBinaryOpStub igostub(op_, overwrite_mode_);
GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED);
__ CallStub(&igostub);
}
......@@ -915,7 +900,7 @@ class DeferredInlinedSmiAdd: public DeferredCode {
__ sub(Operand(eax), immediate);
__ push(eax);
__ push(immediate);
GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_);
GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
__ CallStub(&igostub);
}
......@@ -939,7 +924,7 @@ class DeferredInlinedSmiAddReversed: public DeferredCode {
__ sub(Operand(eax), immediate);
__ push(immediate);
__ push(eax);
GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_);
GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
__ CallStub(&igostub);
}
......@@ -963,7 +948,7 @@ class DeferredInlinedSmiSub: public DeferredCode {
__ add(Operand(eax), immediate);
__ push(eax);
__ push(immediate);
GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_);
GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
__ CallStub(&igostub);
}
......@@ -989,7 +974,7 @@ class DeferredInlinedSmiSubReversed: public DeferredCode {
__ add(eax, Operand(tos_reg_));
__ push(eax);
__ push(tos_reg_);
GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_);
GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
__ CallStub(&igostub);
}
......@@ -3969,182 +3954,172 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
}
void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label call_runtime;
__ mov(eax, Operand(esp, 1 * kPointerSize)); // Get y.
__ mov(edx, Operand(esp, 2 * kPointerSize)); // Get x.
void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// Perform fast-case smi code for the operation (eax <op> ebx) and
// leave result in register eax.
// Prepare the smi check of both operands by or'ing them together
// before checking against the smi mask.
__ mov(ecx, Operand(ebx));
__ or_(ecx, Operand(eax));
// 1. Smi case.
switch (op_) {
case Token::ADD: {
// eax: y.
// edx: x.
Label revert;
__ mov(ecx, Operand(eax));
__ or_(ecx, Operand(edx)); // ecx = x | y.
__ add(eax, Operand(edx)); // Add y optimistically.
// Go slow-path in case of overflow.
__ j(overflow, &revert, not_taken);
// Go slow-path in case of non-smi operands.
ASSERT(kSmiTag == 0); // adjust code below
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &revert, not_taken);
__ ret(2 * kPointerSize); // Remove all operands.
// Revert optimistic add.
__ bind(&revert);
__ sub(eax, Operand(edx));
case Token::ADD:
__ add(eax, Operand(ebx)); // add optimistically
__ j(overflow, slow, not_taken);
break;
}
case Token::SUB: {
// eax: y.
// edx: x.
Label revert;
__ mov(ecx, Operand(edx));
__ or_(ecx, Operand(eax)); // ecx = x | y.
__ sub(edx, Operand(eax)); // Subtract y optimistically.
// Go slow-path in case of overflow.
__ j(overflow, &revert, not_taken);
// Go slow-path in case of non-smi operands.
ASSERT(kSmiTag == 0); // adjust code below
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &revert, not_taken);
__ mov(eax, Operand(edx));
__ ret(2 * kPointerSize); // Remove all operands.
// Revert optimistic sub.
__ bind(&revert);
__ add(edx, Operand(eax));
case Token::SUB:
__ sub(eax, Operand(ebx)); // subtract optimistically
__ j(overflow, slow, not_taken);
break;
}
case Token::MUL: {
// eax: y
// edx: x
// a) both operands smi and result fits into a smi -> return.
// b) at least one of operands non-smi -> non_smi_operands.
// c) result does not fit in a smi -> non_smi_result.
Label non_smi_operands, non_smi_result;
// Tag check.
__ mov(ecx, Operand(edx));
__ or_(ecx, Operand(eax)); // ecx = x | y.
ASSERT(kSmiTag == 0); // Adjust code below.
__ test(ecx, Immediate(kSmiTagMask));
// Jump if not both smi; check if float numbers.
__ j(not_zero, &non_smi_operands, not_taken);
// Get copies of operands.
__ mov(ebx, Operand(eax));
__ mov(ecx, Operand(edx));
// If the smi tag is 0 we can just leave the tag on one operand.
ASSERT(kSmiTag == 0); // adjust code below
// Remove tag from one of the operands (but keep sign).
__ sar(ecx, kSmiTagSize);
// Do multiplication.
__ imul(eax, Operand(ecx)); // Multiplication of Smis; result in eax.
// Go slow on overflows.
__ j(overflow, &non_smi_result, not_taken);
// ...but operands OK for float arithmetic.
// If the result is +0 we may need to check if the result should
// really be -0. Welcome to the -0 fan club.
__ NegativeZeroTest(eax, ebx, edx, ecx, &non_smi_result);
case Token::DIV:
case Token::MOD:
// Sign extend eax into edx:eax.
__ cdq();
// Check for 0 divisor.
__ test(ebx, Operand(ebx));
__ j(zero, slow, not_taken);
break;
__ ret(2 * kPointerSize);
default:
// Fall-through to smi check.
break;
}
__ bind(&non_smi_result);
// TODO(1243132): Do not check float operands here.
__ bind(&non_smi_operands);
__ mov(eax, Operand(esp, 1 * kPointerSize));
__ mov(edx, Operand(esp, 2 * kPointerSize));
// Perform the actual smi check.
ASSERT(kSmiTag == 0); // adjust zero check if not the case
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, slow, not_taken);
switch (op_) {
case Token::ADD:
case Token::SUB:
// Do nothing here.
break;
}
case Token::DIV: {
// eax: y
// edx: x
Label non_smi_operands, non_smi_result, division_by_zero;
__ mov(ebx, Operand(eax)); // Get y
__ mov(eax, Operand(edx)); // Get x
__ cdq(); // Sign extend eax into edx:eax.
// Tag check.
__ mov(ecx, Operand(ebx));
__ or_(ecx, Operand(eax)); // ecx = x | y.
ASSERT(kSmiTag == 0); // Adjust code below.
__ test(ecx, Immediate(kSmiTagMask));
// Jump if not both smi; check if float numbers.
__ j(not_zero, &non_smi_operands, not_taken);
__ test(ebx, Operand(ebx)); // Check for 0 divisor.
__ j(zero, &division_by_zero, not_taken);
case Token::MUL:
// If the smi tag is 0 we can just leave the tag on one operand.
ASSERT(kSmiTag == 0); // adjust code below if not the case
// Remove tag from one of the operands (but keep sign).
__ sar(eax, kSmiTagSize);
// Do multiplication.
__ imul(eax, Operand(ebx)); // multiplication of smis; result in eax
// Go slow on overflows.
__ j(overflow, slow, not_taken);
// Check for negative zero result.
__ NegativeZeroTest(eax, ecx, slow); // use ecx = x | y
break;
case Token::DIV:
// Divide edx:eax by ebx.
__ idiv(ebx);
// Check for the corner case of dividing the most negative smi by -1.
// (We cannot use the overflow flag, since it is not set by idiv.)
// Check for the corner case of dividing the most negative smi
// by -1. We cannot use the overflow flag, since it is not set
// by idiv instruction.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ cmp(eax, 0x40000000);
__ j(equal, &non_smi_result);
// If the result is +0 we may need to check if the result should
// really be -0. Welcome to the -0 fan club.
__ NegativeZeroTest(eax, ecx, &non_smi_result); // Use ecx = x | y.
__ j(equal, slow);
// Check for negative zero result.
__ NegativeZeroTest(eax, ecx, slow); // use ecx = x | y
// Check that the remainder is zero.
__ test(edx, Operand(edx));
// Use floats if there's a remainder.
__ j(not_zero, &non_smi_result, not_taken);
__ shl(eax, kSmiTagSize);
__ ret(2 * kPointerSize); // Remove all operands.
__ bind(&division_by_zero);
__ mov(eax, Operand(esp, 1 * kPointerSize));
__ mov(edx, Operand(esp, 2 * kPointerSize));
__ jmp(&call_runtime); // Division by zero must go through runtime.
__ bind(&non_smi_result);
// TODO(1243132): Do not check float operands here.
__ bind(&non_smi_operands);
__ mov(eax, Operand(esp, 1 * kPointerSize));
__ mov(edx, Operand(esp, 2 * kPointerSize));
__ j(not_zero, slow);
// Tag the result and store it in register eax.
ASSERT(kSmiTagSize == times_2); // adjust code if not the case
__ lea(eax, Operand(eax, times_2, kSmiTag));
break;
}
case Token::MOD: {
Label slow;
__ mov(ebx, Operand(eax)); // get y
__ mov(eax, Operand(edx)); // get x
__ cdq(); // sign extend eax into edx:eax
// tag check
__ mov(ecx, Operand(ebx));
__ or_(ecx, Operand(eax)); // ecx = x | y;
ASSERT(kSmiTag == 0); // adjust code below
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &slow, not_taken);
__ test(ebx, Operand(ebx)); // test for y == 0
__ j(zero, &slow);
// Fast case: Do integer division and use remainder.
case Token::MOD:
// Divide edx:eax by ebx.
__ idiv(ebx);
__ NegativeZeroTest(edx, ecx, &slow); // use ecx = x | y
// Check for negative zero result.
__ NegativeZeroTest(edx, ecx, slow); // use ecx = x | y
// Move remainder to register eax.
__ mov(eax, Operand(edx));
__ ret(2 * kPointerSize);
// Slow case: Call runtime operator implementation.
__ bind(&slow);
__ mov(eax, Operand(esp, 1 * kPointerSize));
__ mov(edx, Operand(esp, 2 * kPointerSize));
// Fall through to |call_runtime|.
break;
}
case Token::BIT_OR:
__ or_(eax, Operand(ebx));
break;
case Token::BIT_AND:
__ and_(eax, Operand(ebx));
break;
case Token::BIT_XOR:
case Token::SAR:
__ xor_(eax, Operand(ebx));
break;
case Token::SHL:
case Token::SHR: {
// Smi-case for bitops should already have been inlined.
case Token::SHR:
case Token::SAR:
// Move the second operand into register ecx.
__ mov(ecx, Operand(ebx));
// Remove tags from operands (but keep sign).
__ sar(eax, kSmiTagSize);
__ sar(ecx, kSmiTagSize);
// Perform the operation.
switch (op_) {
case Token::SAR:
__ sar(eax);
// No checks of result necessary
break;
case Token::SHR:
__ shr(eax);
// Check that the *unsigned* result fits in a smi.
// Neither of the two high-order bits can be set:
// - 0x80000000: high bit would be lost when smi tagging.
// - 0x40000000: this number would convert to negative when
// Smi tagging these two cases can only happen with shifts
// by 0 or 1 when handed a valid smi.
__ test(eax, Immediate(0xc0000000));
__ j(not_zero, slow, not_taken);
break;
case Token::SHL:
__ shl(eax);
// Check that the *signed* result fits in a smi.
__ lea(ecx, Operand(eax, 0x40000000));
__ test(ecx, Immediate(0x80000000));
__ j(not_zero, slow, not_taken);
break;
default:
UNREACHABLE();
}
// Tag the result and store it in register eax.
ASSERT(kSmiTagSize == times_2); // adjust code if not the case
__ lea(eax, Operand(eax, times_2, kSmiTag));
break;
}
default: {
default:
UNREACHABLE();
}
break;
}
}
void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label call_runtime;
if (flags_ == SMI_CODE_IN_STUB) {
// The fast case smi code wasn't inlined in the stub caller
// code. Generate it here to speed up common operations.
Label slow;
__ mov(ebx, Operand(esp, 1 * kPointerSize)); // get y
__ mov(eax, Operand(esp, 2 * kPointerSize)); // get x
GenerateSmiCode(masm, &slow);
__ ret(2 * kPointerSize); // remove both operands
// Too bad. The fast case smi code didn't succeed.
__ bind(&slow);
}
// Setup registers.
__ mov(eax, Operand(esp, 1 * kPointerSize)); // get y
__ mov(edx, Operand(esp, 2 * kPointerSize)); // get x
// 2. Floating point case.
// Floating point case.
switch (op_) {
case Token::ADD:
case Token::SUB:
......@@ -4286,7 +4261,8 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
default: UNREACHABLE(); break;
}
// 3. If all else fails, use the runtime system to get the correct result.
// If all else fails, use the runtime system to get the correct
// result.
__ bind(&call_runtime);
switch (op_) {
case Token::ADD:
......
......@@ -288,10 +288,11 @@ class CodeGenerator: public Visitor {
void GenericBinaryOperation(Token::Value op,
const OverwriteMode overwrite_mode = NO_OVERWRITE);
void Comparison(Condition cc, bool strict = false);
// Inline small integer literals. To prevent long attacker-controlled byte
// sequences, we only inline small Smi:s.
// sequences, we only inline small Smis.
static const int kMaxSmiInlinedBits = 16;
bool IsInlineSmi(Literal* literal);
void SmiComparison(Condition cc, Handle<Object> value, bool strict = false);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment