Commit 61f40129 authored by yangguo@chromium.org's avatar yangguo@chromium.org

Use C++ style type casts.

R=mstarzinger@chromium.org
BUG=

Review URL: https://chromiumcodereview.appspot.com/11644097

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13326 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 0e46919c
...@@ -1776,7 +1776,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { ...@@ -1776,7 +1776,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
if (::v8::internal::FLAG_trace_sim) { if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %p\n", reinterpret_cast<void *>(*result)); PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
} }
set_register(r0, (int32_t) *result); set_register(r0, reinterpret_cast<int32_t>(*result));
} else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) { } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
SimulatorRuntimeDirectGetterCall target = SimulatorRuntimeDirectGetterCall target =
reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external); reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
...@@ -1793,7 +1793,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { ...@@ -1793,7 +1793,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
if (::v8::internal::FLAG_trace_sim) { if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %p\n", reinterpret_cast<void *>(*result)); PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
} }
set_register(r0, (int32_t) *result); set_register(r0, reinterpret_cast<int32_t>(*result));
} else { } else {
// builtin call. // builtin call.
ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL); ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
...@@ -3083,15 +3083,15 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) { ...@@ -3083,15 +3083,15 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
if (src_precision == kDoublePrecision) { if (src_precision == kDoublePrecision) {
if (unsigned_integer) { if (unsigned_integer) {
set_d_register_from_double(dst, set_d_register_from_double(
static_cast<double>((uint32_t)val)); dst, static_cast<double>(static_cast<uint32_t>(val)));
} else { } else {
set_d_register_from_double(dst, static_cast<double>(val)); set_d_register_from_double(dst, static_cast<double>(val));
} }
} else { } else {
if (unsigned_integer) { if (unsigned_integer) {
set_s_register_from_float(dst, set_s_register_from_float(
static_cast<float>((uint32_t)val)); dst, static_cast<float>(static_cast<uint32_t>(val)));
} else { } else {
set_s_register_from_float(dst, static_cast<float>(val)); set_s_register_from_float(dst, static_cast<float>(val));
} }
......
...@@ -212,7 +212,7 @@ double InternalStringToIntDouble(UnicodeCache* unicode_cache, ...@@ -212,7 +212,7 @@ double InternalStringToIntDouble(UnicodeCache* unicode_cache,
} }
// Rounding up may cause overflow. // Rounding up may cause overflow.
if ((number & ((int64_t)1 << 53)) != 0) { if ((number & (static_cast<int64_t>(1) << 53)) != 0) {
exponent++; exponent++;
number >>= 1; number >>= 1;
} }
......
...@@ -261,8 +261,9 @@ MaybeObject* Heap::NumberFromInt32( ...@@ -261,8 +261,9 @@ MaybeObject* Heap::NumberFromInt32(
MaybeObject* Heap::NumberFromUint32( MaybeObject* Heap::NumberFromUint32(
uint32_t value, PretenureFlag pretenure) { uint32_t value, PretenureFlag pretenure) {
if ((int32_t)value >= 0 && Smi::IsValid((int32_t)value)) { if (static_cast<int32_t>(value) >= 0 &&
return Smi::FromInt((int32_t)value); Smi::IsValid(static_cast<int32_t>(value))) {
return Smi::FromInt(static_cast<int32_t>(value));
} }
// Bypass NumberFromDouble to avoid various redundant checks. // Bypass NumberFromDouble to avoid various redundant checks.
return AllocateHeapNumber(FastUI2D(value), pretenure); return AllocateHeapNumber(FastUI2D(value), pretenure);
......
...@@ -1689,7 +1689,7 @@ static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) { ...@@ -1689,7 +1689,7 @@ static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) {
for (; for (;
cell_index < last_cell_index; cell_index < last_cell_index;
cell_index++, cell_base += 32 * kPointerSize) { cell_index++, cell_base += 32 * kPointerSize) {
ASSERT((unsigned)cell_index == ASSERT(static_cast<unsigned>(cell_index) ==
Bitmap::IndexToCell( Bitmap::IndexToCell(
Bitmap::CellAlignIndex( Bitmap::CellAlignIndex(
p->AddressToMarkbitIndex(cell_base)))); p->AddressToMarkbitIndex(cell_base))));
...@@ -2589,7 +2589,7 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) { ...@@ -2589,7 +2589,7 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
for (; for (;
cell_index < last_cell_index; cell_index < last_cell_index;
cell_index++, cell_base += 32 * kPointerSize) { cell_index++, cell_base += 32 * kPointerSize) {
ASSERT((unsigned)cell_index == ASSERT(static_cast<unsigned>(cell_index) ==
Bitmap::IndexToCell( Bitmap::IndexToCell(
Bitmap::CellAlignIndex( Bitmap::CellAlignIndex(
p->AddressToMarkbitIndex(cell_base)))); p->AddressToMarkbitIndex(cell_base))));
...@@ -2762,7 +2762,7 @@ static void SweepPrecisely(PagedSpace* space, ...@@ -2762,7 +2762,7 @@ static void SweepPrecisely(PagedSpace* space,
for (; for (;
cell_index < last_cell_index; cell_index < last_cell_index;
cell_index++, object_address += 32 * kPointerSize) { cell_index++, object_address += 32 * kPointerSize) {
ASSERT((unsigned)cell_index == ASSERT(static_cast<unsigned>(cell_index) ==
Bitmap::IndexToCell( Bitmap::IndexToCell(
Bitmap::CellAlignIndex( Bitmap::CellAlignIndex(
p->AddressToMarkbitIndex(object_address)))); p->AddressToMarkbitIndex(object_address))));
...@@ -3442,7 +3442,7 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { ...@@ -3442,7 +3442,7 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
for ( ; for ( ;
cell_index < last_cell_index; cell_index < last_cell_index;
cell_index++, block_address += 32 * kPointerSize) { cell_index++, block_address += 32 * kPointerSize) {
ASSERT((unsigned)cell_index == ASSERT(static_cast<unsigned>(cell_index) ==
Bitmap::IndexToCell( Bitmap::IndexToCell(
Bitmap::CellAlignIndex( Bitmap::CellAlignIndex(
p->AddressToMarkbitIndex(block_address)))); p->AddressToMarkbitIndex(block_address))));
......
...@@ -583,7 +583,7 @@ bool Assembler::IsNop(Instr instr, unsigned int type) { ...@@ -583,7 +583,7 @@ bool Assembler::IsNop(Instr instr, unsigned int type) {
int32_t Assembler::GetBranchOffset(Instr instr) { int32_t Assembler::GetBranchOffset(Instr instr) {
ASSERT(IsBranch(instr)); ASSERT(IsBranch(instr));
return ((int16_t)(instr & kImm16Mask)) << 2; return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
} }
...@@ -716,7 +716,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos) { ...@@ -716,7 +716,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize); Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize); Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
ASSERT(IsOri(instr_ori)); ASSERT(IsOri(instr_ori));
uint32_t imm = (uint32_t)buffer_ + target_pos; uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
ASSERT((imm & 3) == 0); ASSERT((imm & 3) == 0);
instr_lui &= ~kImm16Mask; instr_lui &= ~kImm16Mask;
...@@ -727,7 +727,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos) { ...@@ -727,7 +727,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
instr_at_put(pos + 1 * Assembler::kInstrSize, instr_at_put(pos + 1 * Assembler::kInstrSize,
instr_ori | (imm & kImm16Mask)); instr_ori | (imm & kImm16Mask));
} else { } else {
uint32_t imm28 = (uint32_t)buffer_ + target_pos; uint32_t imm28 = reinterpret_cast<uint32_t>(buffer_) + target_pos;
imm28 &= kImm28Mask; imm28 &= kImm28Mask;
ASSERT((imm28 & 3) == 0); ASSERT((imm28 & 3) == 0);
...@@ -979,7 +979,7 @@ uint32_t Assembler::jump_address(Label* L) { ...@@ -979,7 +979,7 @@ uint32_t Assembler::jump_address(Label* L) {
} }
} }
uint32_t imm = (uint32_t)buffer_ + target_pos; uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
ASSERT((imm & 3) == 0); ASSERT((imm & 3) == 0);
return imm; return imm;
...@@ -1114,7 +1114,8 @@ void Assembler::j(int32_t target) { ...@@ -1114,7 +1114,8 @@ void Assembler::j(int32_t target) {
#if DEBUG #if DEBUG
// Get pc of delay slot. // Get pc of delay slot.
uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize); uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0; bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
(kImm26Bits + kImmFieldShift)) == 0;
ASSERT(in_range && ((target & 3) == 0)); ASSERT(in_range && ((target & 3) == 0));
#endif #endif
GenInstrJump(J, target >> 2); GenInstrJump(J, target >> 2);
...@@ -1135,7 +1136,8 @@ void Assembler::jal(int32_t target) { ...@@ -1135,7 +1136,8 @@ void Assembler::jal(int32_t target) {
#ifdef DEBUG #ifdef DEBUG
// Get pc of delay slot. // Get pc of delay slot.
uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize); uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0; bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
(kImm26Bits + kImmFieldShift)) == 0;
ASSERT(in_range && ((target & 3) == 0)); ASSERT(in_range && ((target & 3) == 0));
#endif #endif
positions_recorder()->WriteRecordedPositions(); positions_recorder()->WriteRecordedPositions();
...@@ -1154,8 +1156,8 @@ void Assembler::jalr(Register rs, Register rd) { ...@@ -1154,8 +1156,8 @@ void Assembler::jalr(Register rs, Register rd) {
void Assembler::j_or_jr(int32_t target, Register rs) { void Assembler::j_or_jr(int32_t target, Register rs) {
// Get pc of delay slot. // Get pc of delay slot.
uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize); uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0; bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
(kImm26Bits + kImmFieldShift)) == 0;
if (in_range) { if (in_range) {
j(target); j(target);
} else { } else {
...@@ -1167,8 +1169,8 @@ void Assembler::j_or_jr(int32_t target, Register rs) { ...@@ -1167,8 +1169,8 @@ void Assembler::j_or_jr(int32_t target, Register rs) {
void Assembler::jal_or_jalr(int32_t target, Register rs) { void Assembler::jal_or_jalr(int32_t target, Register rs) {
// Get pc of delay slot. // Get pc of delay slot.
uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize); uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0; bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
(kImm26Bits+kImmFieldShift)) == 0;
if (in_range) { if (in_range) {
jal(target); jal(target);
} else { } else {
...@@ -1927,7 +1929,7 @@ int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) { ...@@ -1927,7 +1929,7 @@ int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
return 2; // Number of instructions patched. return 2; // Number of instructions patched.
} else { } else {
uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2; uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
if ((int32_t)imm28 == kEndOfJumpChain) { if (static_cast<int32_t>(imm28) == kEndOfJumpChain) {
return 0; // Number of instructions patched. return 0; // Number of instructions patched.
} }
imm28 += pc_delta; imm28 += pc_delta;
...@@ -2177,9 +2179,10 @@ void Assembler::set_target_address_at(Address pc, Address target) { ...@@ -2177,9 +2179,10 @@ void Assembler::set_target_address_at(Address pc, Address target) {
Instr instr3 = instr_at(pc + 2 * kInstrSize); Instr instr3 = instr_at(pc + 2 * kInstrSize);
uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize); uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize);
bool in_range = bool in_range = (ipc ^ static_cast<uint32_t>(itarget) >>
((uint32_t)(ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0; (kImm26Bits + kImmFieldShift)) == 0;
uint32_t target_field = (uint32_t)(itarget & kJumpAddrMask) >> kImmFieldShift; uint32_t target_field =
static_cast<uint32_t>(itarget & kJumpAddrMask) >>kImmFieldShift;
bool patched_jump = false; bool patched_jump = false;
#ifndef ALLOW_JAL_IN_BOUNDARY_REGION #ifndef ALLOW_JAL_IN_BOUNDARY_REGION
......
...@@ -99,7 +99,7 @@ const int kInvalidFPURegister = -1; ...@@ -99,7 +99,7 @@ const int kInvalidFPURegister = -1;
// FPU (coprocessor 1) control registers. Currently only FCSR is implemented. // FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
const int kFCSRRegister = 31; const int kFCSRRegister = 31;
const int kInvalidFPUControlRegister = -1; const int kInvalidFPUControlRegister = -1;
const uint32_t kFPUInvalidResult = (uint32_t) (1 << 31) - 1; const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1) << 31 - 1;
// FCSR constants. // FCSR constants.
const uint32_t kFCSRInexactFlagBit = 2; const uint32_t kFCSRInexactFlagBit = 2;
......
...@@ -1703,7 +1703,7 @@ bool CompareIC::HasInlinedSmiCode(Address address) { ...@@ -1703,7 +1703,7 @@ bool CompareIC::HasInlinedSmiCode(Address address) {
// was inlined. // was inlined.
Instr instr = Assembler::instr_at(andi_instruction_address); Instr instr = Assembler::instr_at(andi_instruction_address);
return Assembler::IsAndImmediate(instr) && return Assembler::IsAndImmediate(instr) &&
Assembler::GetRt(instr) == (uint32_t)zero_reg.code(); Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
} }
...@@ -1715,7 +1715,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { ...@@ -1715,7 +1715,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// was inlined. // was inlined.
Instr instr = Assembler::instr_at(andi_instruction_address); Instr instr = Assembler::instr_at(andi_instruction_address);
if (!(Assembler::IsAndImmediate(instr) && if (!(Assembler::IsAndImmediate(instr) &&
Assembler::GetRt(instr) == (uint32_t)zero_reg.code())) { Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) {
return; return;
} }
......
...@@ -1545,7 +1545,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { ...@@ -1545,7 +1545,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
FUNCTION_ADDR(target), arg1); FUNCTION_ADDR(target), arg1);
} }
v8::Handle<v8::Value> result = target(arg1); v8::Handle<v8::Value> result = target(arg1);
*(reinterpret_cast<int*>(arg0)) = (int32_t) *result; *(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
set_register(v0, arg0); set_register(v0, arg0);
} else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) { } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
// See DirectCEntryStub::GenerateCall for explanation of register usage. // See DirectCEntryStub::GenerateCall for explanation of register usage.
...@@ -1556,7 +1556,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { ...@@ -1556,7 +1556,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
FUNCTION_ADDR(target), arg1, arg2); FUNCTION_ADDR(target), arg1, arg2);
} }
v8::Handle<v8::Value> result = target(arg1, arg2); v8::Handle<v8::Value> result = target(arg1, arg2);
*(reinterpret_cast<int*>(arg0)) = (int32_t) *result; *(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
set_register(v0, arg0); set_register(v0, arg0);
} else { } else {
SimulatorRuntimeCall target = SimulatorRuntimeCall target =
...@@ -2193,8 +2193,8 @@ void Simulator::DecodeTypeRegister(Instruction* instr) { ...@@ -2193,8 +2193,8 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
case CVT_D_L: // Mips32r2 instruction. case CVT_D_L: // Mips32r2 instruction.
// Watch the signs here, we want 2 32-bit vals // Watch the signs here, we want 2 32-bit vals
// to make a sign-64. // to make a sign-64.
i64 = (uint32_t) get_fpu_register(fs_reg); i64 = static_cast<uint32_t>(get_fpu_register(fs_reg));
i64 |= ((int64_t) get_fpu_register(fs_reg + 1) << 32); i64 |= static_cast<int64_t>(get_fpu_register(fs_reg + 1)) << 32;
set_fpu_register_double(fd_reg, static_cast<double>(i64)); set_fpu_register_double(fd_reg, static_cast<double>(i64));
break; break;
case CVT_S_L: case CVT_S_L:
......
...@@ -1734,7 +1734,7 @@ bool Object::IsStringObjectWithCharacterAt(uint32_t index) { ...@@ -1734,7 +1734,7 @@ bool Object::IsStringObjectWithCharacterAt(uint32_t index) {
if (!js_value->value()->IsString()) return false; if (!js_value->value()->IsString()) return false;
String* str = String::cast(js_value->value()); String* str = String::cast(js_value->value());
if (index >= (uint32_t)str->length()) return false; if (index >= static_cast<uint32_t>(str->length())) return false;
return true; return true;
} }
......
...@@ -6878,7 +6878,7 @@ String* ConsStringIteratorOp::NextLeaf(bool* blew_stack, ...@@ -6878,7 +6878,7 @@ String* ConsStringIteratorOp::NextLeaf(bool* blew_stack,
if ((type & kStringRepresentationMask) != kConsStringTag) { if ((type & kStringRepresentationMask) != kConsStringTag) {
// Pop stack so next iteration is in correct place. // Pop stack so next iteration is in correct place.
Pop(); Pop();
unsigned length = (unsigned) string->length(); unsigned length = static_cast<unsigned>(string->length());
// Could be a flattened ConsString. // Could be a flattened ConsString.
if (length == 0) continue; if (length == 0) continue;
*length_out = length; *length_out = length;
...@@ -6896,7 +6896,7 @@ String* ConsStringIteratorOp::NextLeaf(bool* blew_stack, ...@@ -6896,7 +6896,7 @@ String* ConsStringIteratorOp::NextLeaf(bool* blew_stack,
type = string->map()->instance_type(); type = string->map()->instance_type();
if ((type & kStringRepresentationMask) != kConsStringTag) { if ((type & kStringRepresentationMask) != kConsStringTag) {
AdjustMaximumDepth(); AdjustMaximumDepth();
unsigned length = (unsigned) string->length(); unsigned length = static_cast<unsigned>(string->length());
ASSERT(length != 0); ASSERT(length != 0);
*length_out = length; *length_out = length;
*type_out = type; *type_out = type;
......
...@@ -304,7 +304,7 @@ inline uint32_t ComputeLongHash(uint64_t key) { ...@@ -304,7 +304,7 @@ inline uint32_t ComputeLongHash(uint64_t key) {
hash = hash ^ (hash >> 11); hash = hash ^ (hash >> 11);
hash = hash + (hash << 6); hash = hash + (hash << 6);
hash = hash ^ (hash >> 22); hash = hash ^ (hash >> 22);
return (uint32_t) hash; return static_cast<uint32_t>(hash);
} }
......
...@@ -164,6 +164,13 @@ TEST(HeapObjects) { ...@@ -164,6 +164,13 @@ TEST(HeapObjects) {
CHECK_EQ(static_cast<double>(static_cast<uint32_t>(Smi::kMaxValue) + 1), CHECK_EQ(static_cast<double>(static_cast<uint32_t>(Smi::kMaxValue) + 1),
value->Number()); value->Number());
maybe_value = HEAP->NumberFromUint32(static_cast<uint32_t>(1) << 31);
value = maybe_value->ToObjectChecked();
CHECK(value->IsHeapNumber());
CHECK(value->IsNumber());
CHECK_EQ(static_cast<double>(static_cast<uint32_t>(1) << 31),
value->Number());
// nan oddball checks // nan oddball checks
CHECK(HEAP->nan_value()->IsNumber()); CHECK(HEAP->nan_value()->IsNumber());
CHECK(isnan(HEAP->nan_value()->Number())); CHECK(isnan(HEAP->nan_value()->Number()));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment