Commit 61f40129 authored by yangguo@chromium.org's avatar yangguo@chromium.org

Use C++ style type casts.

R=mstarzinger@chromium.org
BUG=

Review URL: https://chromiumcodereview.appspot.com/11644097

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13326 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 0e46919c
......@@ -1776,7 +1776,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
}
set_register(r0, (int32_t) *result);
set_register(r0, reinterpret_cast<int32_t>(*result));
} else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
SimulatorRuntimeDirectGetterCall target =
reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
......@@ -1793,7 +1793,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
}
set_register(r0, (int32_t) *result);
set_register(r0, reinterpret_cast<int32_t>(*result));
} else {
// builtin call.
ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
......@@ -3083,15 +3083,15 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
if (src_precision == kDoublePrecision) {
if (unsigned_integer) {
set_d_register_from_double(dst,
static_cast<double>((uint32_t)val));
set_d_register_from_double(
dst, static_cast<double>(static_cast<uint32_t>(val)));
} else {
set_d_register_from_double(dst, static_cast<double>(val));
}
} else {
if (unsigned_integer) {
set_s_register_from_float(dst,
static_cast<float>((uint32_t)val));
set_s_register_from_float(
dst, static_cast<float>(static_cast<uint32_t>(val)));
} else {
set_s_register_from_float(dst, static_cast<float>(val));
}
......
......@@ -212,7 +212,7 @@ double InternalStringToIntDouble(UnicodeCache* unicode_cache,
}
// Rounding up may cause overflow.
if ((number & ((int64_t)1 << 53)) != 0) {
if ((number & (static_cast<int64_t>(1) << 53)) != 0) {
exponent++;
number >>= 1;
}
......
......@@ -261,8 +261,9 @@ MaybeObject* Heap::NumberFromInt32(
MaybeObject* Heap::NumberFromUint32(
uint32_t value, PretenureFlag pretenure) {
if ((int32_t)value >= 0 && Smi::IsValid((int32_t)value)) {
return Smi::FromInt((int32_t)value);
if (static_cast<int32_t>(value) >= 0 &&
Smi::IsValid(static_cast<int32_t>(value))) {
return Smi::FromInt(static_cast<int32_t>(value));
}
// Bypass NumberFromDouble to avoid various redundant checks.
return AllocateHeapNumber(FastUI2D(value), pretenure);
......
......@@ -1689,7 +1689,7 @@ static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) {
for (;
cell_index < last_cell_index;
cell_index++, cell_base += 32 * kPointerSize) {
ASSERT((unsigned)cell_index ==
ASSERT(static_cast<unsigned>(cell_index) ==
Bitmap::IndexToCell(
Bitmap::CellAlignIndex(
p->AddressToMarkbitIndex(cell_base))));
......@@ -2589,7 +2589,7 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
for (;
cell_index < last_cell_index;
cell_index++, cell_base += 32 * kPointerSize) {
ASSERT((unsigned)cell_index ==
ASSERT(static_cast<unsigned>(cell_index) ==
Bitmap::IndexToCell(
Bitmap::CellAlignIndex(
p->AddressToMarkbitIndex(cell_base))));
......@@ -2762,7 +2762,7 @@ static void SweepPrecisely(PagedSpace* space,
for (;
cell_index < last_cell_index;
cell_index++, object_address += 32 * kPointerSize) {
ASSERT((unsigned)cell_index ==
ASSERT(static_cast<unsigned>(cell_index) ==
Bitmap::IndexToCell(
Bitmap::CellAlignIndex(
p->AddressToMarkbitIndex(object_address))));
......@@ -3442,7 +3442,7 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
for ( ;
cell_index < last_cell_index;
cell_index++, block_address += 32 * kPointerSize) {
ASSERT((unsigned)cell_index ==
ASSERT(static_cast<unsigned>(cell_index) ==
Bitmap::IndexToCell(
Bitmap::CellAlignIndex(
p->AddressToMarkbitIndex(block_address))));
......
......@@ -583,7 +583,7 @@ bool Assembler::IsNop(Instr instr, unsigned int type) {
int32_t Assembler::GetBranchOffset(Instr instr) {
ASSERT(IsBranch(instr));
return ((int16_t)(instr & kImm16Mask)) << 2;
return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
}
......@@ -716,7 +716,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
ASSERT(IsOri(instr_ori));
uint32_t imm = (uint32_t)buffer_ + target_pos;
uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
ASSERT((imm & 3) == 0);
instr_lui &= ~kImm16Mask;
......@@ -727,7 +727,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
instr_at_put(pos + 1 * Assembler::kInstrSize,
instr_ori | (imm & kImm16Mask));
} else {
uint32_t imm28 = (uint32_t)buffer_ + target_pos;
uint32_t imm28 = reinterpret_cast<uint32_t>(buffer_) + target_pos;
imm28 &= kImm28Mask;
ASSERT((imm28 & 3) == 0);
......@@ -979,7 +979,7 @@ uint32_t Assembler::jump_address(Label* L) {
}
}
uint32_t imm = (uint32_t)buffer_ + target_pos;
uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
ASSERT((imm & 3) == 0);
return imm;
......@@ -1114,7 +1114,8 @@ void Assembler::j(int32_t target) {
#if DEBUG
// Get pc of delay slot.
uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
(kImm26Bits + kImmFieldShift)) == 0;
ASSERT(in_range && ((target & 3) == 0));
#endif
GenInstrJump(J, target >> 2);
......@@ -1135,7 +1136,8 @@ void Assembler::jal(int32_t target) {
#ifdef DEBUG
// Get pc of delay slot.
uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
(kImm26Bits + kImmFieldShift)) == 0;
ASSERT(in_range && ((target & 3) == 0));
#endif
positions_recorder()->WriteRecordedPositions();
......@@ -1154,8 +1156,8 @@ void Assembler::jalr(Register rs, Register rd) {
void Assembler::j_or_jr(int32_t target, Register rs) {
// Get pc of delay slot.
uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
(kImm26Bits + kImmFieldShift)) == 0;
if (in_range) {
j(target);
} else {
......@@ -1167,8 +1169,8 @@ void Assembler::j_or_jr(int32_t target, Register rs) {
void Assembler::jal_or_jalr(int32_t target, Register rs) {
// Get pc of delay slot.
uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
(kImm26Bits+kImmFieldShift)) == 0;
if (in_range) {
jal(target);
} else {
......@@ -1927,7 +1929,7 @@ int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
return 2; // Number of instructions patched.
} else {
uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
if ((int32_t)imm28 == kEndOfJumpChain) {
if (static_cast<int32_t>(imm28) == kEndOfJumpChain) {
return 0; // Number of instructions patched.
}
imm28 += pc_delta;
......@@ -2177,9 +2179,10 @@ void Assembler::set_target_address_at(Address pc, Address target) {
Instr instr3 = instr_at(pc + 2 * kInstrSize);
uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize);
bool in_range =
((uint32_t)(ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0;
uint32_t target_field = (uint32_t)(itarget & kJumpAddrMask) >> kImmFieldShift;
bool in_range = (ipc ^ static_cast<uint32_t>(itarget) >>
(kImm26Bits + kImmFieldShift)) == 0;
uint32_t target_field =
static_cast<uint32_t>(itarget & kJumpAddrMask) >>kImmFieldShift;
bool patched_jump = false;
#ifndef ALLOW_JAL_IN_BOUNDARY_REGION
......
......@@ -99,7 +99,7 @@ const int kInvalidFPURegister = -1;
// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
const int kFCSRRegister = 31;
const int kInvalidFPUControlRegister = -1;
const uint32_t kFPUInvalidResult = (uint32_t) (1 << 31) - 1;
const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1) << 31 - 1;
// FCSR constants.
const uint32_t kFCSRInexactFlagBit = 2;
......
......@@ -1703,7 +1703,7 @@ bool CompareIC::HasInlinedSmiCode(Address address) {
// was inlined.
Instr instr = Assembler::instr_at(andi_instruction_address);
return Assembler::IsAndImmediate(instr) &&
Assembler::GetRt(instr) == (uint32_t)zero_reg.code();
Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
}
......@@ -1715,7 +1715,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// was inlined.
Instr instr = Assembler::instr_at(andi_instruction_address);
if (!(Assembler::IsAndImmediate(instr) &&
Assembler::GetRt(instr) == (uint32_t)zero_reg.code())) {
Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) {
return;
}
......
......@@ -1545,7 +1545,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
FUNCTION_ADDR(target), arg1);
}
v8::Handle<v8::Value> result = target(arg1);
*(reinterpret_cast<int*>(arg0)) = (int32_t) *result;
*(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
set_register(v0, arg0);
} else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
// See DirectCEntryStub::GenerateCall for explanation of register usage.
......@@ -1556,7 +1556,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
FUNCTION_ADDR(target), arg1, arg2);
}
v8::Handle<v8::Value> result = target(arg1, arg2);
*(reinterpret_cast<int*>(arg0)) = (int32_t) *result;
*(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
set_register(v0, arg0);
} else {
SimulatorRuntimeCall target =
......@@ -2193,8 +2193,8 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
case CVT_D_L: // Mips32r2 instruction.
// Watch the signs here, we want 2 32-bit vals
// to make a sign-64.
i64 = (uint32_t) get_fpu_register(fs_reg);
i64 |= ((int64_t) get_fpu_register(fs_reg + 1) << 32);
i64 = static_cast<uint32_t>(get_fpu_register(fs_reg));
i64 |= static_cast<int64_t>(get_fpu_register(fs_reg + 1)) << 32;
set_fpu_register_double(fd_reg, static_cast<double>(i64));
break;
case CVT_S_L:
......
......@@ -1734,7 +1734,7 @@ bool Object::IsStringObjectWithCharacterAt(uint32_t index) {
if (!js_value->value()->IsString()) return false;
String* str = String::cast(js_value->value());
if (index >= (uint32_t)str->length()) return false;
if (index >= static_cast<uint32_t>(str->length())) return false;
return true;
}
......
......@@ -6878,7 +6878,7 @@ String* ConsStringIteratorOp::NextLeaf(bool* blew_stack,
if ((type & kStringRepresentationMask) != kConsStringTag) {
// Pop stack so next iteration is in correct place.
Pop();
unsigned length = (unsigned) string->length();
unsigned length = static_cast<unsigned>(string->length());
// Could be a flattened ConsString.
if (length == 0) continue;
*length_out = length;
......@@ -6896,7 +6896,7 @@ String* ConsStringIteratorOp::NextLeaf(bool* blew_stack,
type = string->map()->instance_type();
if ((type & kStringRepresentationMask) != kConsStringTag) {
AdjustMaximumDepth();
unsigned length = (unsigned) string->length();
unsigned length = static_cast<unsigned>(string->length());
ASSERT(length != 0);
*length_out = length;
*type_out = type;
......
......@@ -304,7 +304,7 @@ inline uint32_t ComputeLongHash(uint64_t key) {
hash = hash ^ (hash >> 11);
hash = hash + (hash << 6);
hash = hash ^ (hash >> 22);
return (uint32_t) hash;
return static_cast<uint32_t>(hash);
}
......
......@@ -164,6 +164,13 @@ TEST(HeapObjects) {
CHECK_EQ(static_cast<double>(static_cast<uint32_t>(Smi::kMaxValue) + 1),
value->Number());
maybe_value = HEAP->NumberFromUint32(static_cast<uint32_t>(1) << 31);
value = maybe_value->ToObjectChecked();
CHECK(value->IsHeapNumber());
CHECK(value->IsNumber());
CHECK_EQ(static_cast<double>(static_cast<uint32_t>(1) << 31),
value->Number());
// nan oddball checks
CHECK(HEAP->nan_value()->IsNumber());
CHECK(isnan(HEAP->nan_value()->Number()));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment