Commit b4b9fc29 authored by bmeurer's avatar bmeurer Committed by Commit bot

[compiler] Fix a bunch of wrong word size assumptions.

Operations on word size data must be word sized, and not word32.
Currently this only generates worse code, but in the future, it
might even generate wrong code, so we should better get this right
from the beginning.

R=yangguo@chromium.org

Review URL: https://codereview.chromium.org/1748953004

Cr-Commit-Position: refs/heads/master@{#34378}
parent 74d6c64c
......@@ -119,7 +119,7 @@ Node* CodeStubAssembler::LoadStackPointer() {
}
Node* CodeStubAssembler::SmiShiftBitsConstant() {
return Int32Constant(kSmiShiftSize + kSmiTagSize);
return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
}
......@@ -147,13 +147,17 @@ Node* CodeStubAssembler::ChangeInt32ToInt64(Node* value) {
return raw_assembler_->ChangeInt32ToInt64(value);
}
Node* CodeStubAssembler::ChangeUint32ToUint64(Node* value) {
return raw_assembler_->ChangeUint32ToUint64(value);
}
Node* CodeStubAssembler::WordShl(Node* value, int shift) {
return raw_assembler_->WordShl(value, Int32Constant(shift));
return raw_assembler_->WordShl(value, IntPtrConstant(shift));
}
Node* CodeStubAssembler::WordIsSmi(Node* a) {
return WordEqual(raw_assembler_->WordAnd(a, Int32Constant(kSmiTagMask)),
Int32Constant(0));
return WordEqual(raw_assembler_->WordAnd(a, IntPtrConstant(kSmiTagMask)),
IntPtrConstant(0));
}
Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset) {
......@@ -169,22 +173,22 @@ Node* CodeStubAssembler::LoadObjectField(Node* object, int offset) {
Node* CodeStubAssembler::LoadFixedArrayElementSmiIndex(Node* object,
Node* smi_index,
int additional_offset) {
Node* header_size = raw_assembler_->Int32Constant(
additional_offset + FixedArray::kHeaderSize - kHeapObjectTag);
int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
Node* header_size = IntPtrConstant(additional_offset +
FixedArray::kHeaderSize - kHeapObjectTag);
Node* scaled_index =
(kSmiShiftSize == 0)
? raw_assembler_->Word32Shl(
smi_index, Int32Constant(kPointerSizeLog2 - kSmiTagSize))
: raw_assembler_->Word32Shl(SmiUntag(smi_index),
Int32Constant(kPointerSizeLog2));
Node* offset = raw_assembler_->Int32Add(scaled_index, header_size);
return raw_assembler_->Load(MachineType::AnyTagged(), object, offset);
(kSmiShiftBits > kPointerSizeLog2)
? WordSar(smi_index, IntPtrConstant(kSmiShiftBits - kPointerSizeLog2))
: WordShl(smi_index,
IntPtrConstant(kPointerSizeLog2 - kSmiShiftBits));
Node* offset = IntPtrAdd(scaled_index, header_size);
return Load(MachineType::AnyTagged(), object, offset);
}
Node* CodeStubAssembler::LoadFixedArrayElementConstantIndex(Node* object,
int index) {
Node* offset = raw_assembler_->Int32Constant(
FixedArray::kHeaderSize - kHeapObjectTag + index * kPointerSize);
Node* offset = IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag +
index * kPointerSize);
return raw_assembler_->Load(MachineType::AnyTagged(), object, offset);
}
......@@ -192,7 +196,7 @@ Node* CodeStubAssembler::LoadRoot(Heap::RootListIndex root_index) {
if (isolate()->heap()->RootCanBeTreatedAsConstant(root_index)) {
Handle<Object> root = isolate()->heap()->root_handle(root_index);
if (root->IsSmi()) {
return Int32Constant(Handle<Smi>::cast(root)->value());
return SmiConstant(Smi::cast(*root));
} else {
return HeapConstant(Handle<HeapObject>::cast(root));
}
......
......@@ -149,6 +149,7 @@ class CodeStubAssembler {
// Conversions
Node* ChangeInt32ToInt64(Node* value);
Node* ChangeUint32ToUint64(Node* value);
// Projections
Node* Projection(int index, Node* value);
......
......@@ -79,7 +79,7 @@ Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
Node* InterpreterAssembler::LoadRegister(int offset) {
return Load(MachineType::AnyTagged(), RegisterFileRawPointer(),
Int32Constant(offset));
IntPtrConstant(offset));
}
Node* InterpreterAssembler::LoadRegister(Register reg) {
......@@ -97,7 +97,7 @@ Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
Node* InterpreterAssembler::StoreRegister(Node* value, int offset) {
return StoreNoWriteBarrier(MachineRepresentation::kTagged,
RegisterFileRawPointer(), Int32Constant(offset),
RegisterFileRawPointer(), IntPtrConstant(offset),
value);
}
......@@ -113,7 +113,7 @@ Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
Node* InterpreterAssembler::NextRegister(Node* reg_index) {
// Register indexes are negative, so the next index is minus one.
return IntPtrAdd(reg_index, Int32Constant(-1));
return IntPtrAdd(reg_index, IntPtrConstant(-1));
}
Node* InterpreterAssembler::BytecodeOperand(int operand_index) {
......@@ -122,7 +122,7 @@ Node* InterpreterAssembler::BytecodeOperand(int operand_index) {
Bytecodes::GetOperandSize(bytecode_, operand_index));
return Load(
MachineType::Uint8(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
IntPtrAdd(BytecodeOffset(), IntPtrConstant(Bytecodes::GetOperandOffset(
bytecode_, operand_index))));
}
......@@ -132,7 +132,7 @@ Node* InterpreterAssembler::BytecodeOperandSignExtended(int operand_index) {
Bytecodes::GetOperandSize(bytecode_, operand_index));
Node* load = Load(
MachineType::Int8(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
IntPtrAdd(BytecodeOffset(), IntPtrConstant(Bytecodes::GetOperandOffset(
bytecode_, operand_index))));
// Ensure that we sign extend to full pointer size
if (kPointerSize == 8) {
......@@ -148,15 +148,16 @@ Node* InterpreterAssembler::BytecodeOperandShort(int operand_index) {
if (TargetSupportsUnalignedAccess()) {
return Load(
MachineType::Uint16(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
IntPtrAdd(BytecodeOffset(), IntPtrConstant(Bytecodes::GetOperandOffset(
bytecode_, operand_index))));
} else {
int offset = Bytecodes::GetOperandOffset(bytecode_, operand_index);
Node* first_byte = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), Int32Constant(offset)));
Node* first_byte =
Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), IntPtrConstant(offset)));
Node* second_byte =
Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), Int32Constant(offset + 1)));
IntPtrAdd(BytecodeOffset(), IntPtrConstant(offset + 1)));
#if V8_TARGET_LITTLE_ENDIAN
return WordOr(WordShl(second_byte, kBitsPerByte), first_byte);
#elif V8_TARGET_BIG_ENDIAN
......@@ -176,14 +177,14 @@ Node* InterpreterAssembler::BytecodeOperandShortSignExtended(
Node* load;
if (TargetSupportsUnalignedAccess()) {
load = Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), Int32Constant(operand_offset)));
IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else {
#if V8_TARGET_LITTLE_ENDIAN
Node* hi_byte_offset = Int32Constant(operand_offset + 1);
Node* lo_byte_offset = Int32Constant(operand_offset);
Node* hi_byte_offset = IntPtrConstant(operand_offset + 1);
Node* lo_byte_offset = IntPtrConstant(operand_offset);
#elif V8_TARGET_BIG_ENDIAN
Node* hi_byte_offset = Int32Constant(operand_offset);
Node* lo_byte_offset = Int32Constant(operand_offset + 1);
Node* hi_byte_offset = IntPtrConstant(operand_offset);
Node* lo_byte_offset = IntPtrConstant(operand_offset + 1);
#else
#error "Unknown Architecture"
#endif
......@@ -264,14 +265,6 @@ Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
return Load(MachineType::AnyTagged(), constant_pool, entry_offset);
}
Node* InterpreterAssembler::LoadFixedArrayElement(Node* fixed_array,
int index) {
Node* entry_offset =
IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
WordShl(Int32Constant(index), kPointerSizeLog2));
return Load(MachineType::AnyTagged(), fixed_array, entry_offset);
}
Node* InterpreterAssembler::LoadObjectField(Node* object, int offset) {
return Load(MachineType::AnyTagged(), object,
IntPtrConstant(offset - kHeapObjectTag));
......@@ -285,7 +278,7 @@ Node* InterpreterAssembler::LoadContextSlot(Node* context, int slot_index) {
Node* InterpreterAssembler::LoadContextSlot(Node* context, Node* slot_index) {
Node* offset =
IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
Int32Constant(Context::kHeaderSize - kHeapObjectTag));
IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
return Load(MachineType::AnyTagged(), context, offset);
}
......@@ -293,7 +286,7 @@ Node* InterpreterAssembler::StoreContextSlot(Node* context, Node* slot_index,
Node* value) {
Node* offset =
IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
Int32Constant(Context::kHeaderSize - kHeapObjectTag));
IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
return Store(MachineRepresentation::kTagged, context, offset, value);
}
......@@ -368,7 +361,7 @@ Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
Node* function = IntPtrAdd(function_table, function_offset);
Node* function_entry =
Load(MachineType::Pointer(), function,
Int32Constant(offsetof(Runtime::Function, entry)));
IntPtrConstant(offsetof(Runtime::Function, entry)));
return CallStub(callable.descriptor(), code_target, context, arg_count,
first_arg, function_entry, result_size);
......@@ -405,7 +398,7 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight) {
}
Node* InterpreterAssembler::Advance(int delta) {
return IntPtrAdd(BytecodeOffset(), Int32Constant(delta));
return IntPtrAdd(BytecodeOffset(), IntPtrConstant(delta));
}
Node* InterpreterAssembler::Advance(Node* delta) {
......@@ -444,12 +437,15 @@ void InterpreterAssembler::Dispatch() {
void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
Node* target_bytecode = Load(
MachineType::Uint8(), BytecodeArrayTaggedPointer(), new_bytecode_offset);
if (kPointerSize == 8) {
target_bytecode = ChangeUint32ToUint64(target_bytecode);
}
// TODO(rmcilroy): Create a code target dispatch table to avoid conversion
// from code object on every dispatch.
Node* target_code_object =
Load(MachineType::Pointer(), DispatchTableRawPointer(),
Word32Shl(target_bytecode, Int32Constant(kPointerSizeLog2)));
WordShl(target_bytecode, IntPtrConstant(kPointerSizeLog2)));
DispatchToBytecodeHandler(target_code_object, new_bytecode_offset);
}
......
......@@ -62,9 +62,6 @@ class InterpreterAssembler : public compiler::CodeStubAssembler {
// Load constant at |index| in the constant pool.
compiler::Node* LoadConstantPoolEntry(compiler::Node* index);
// Load an element from a fixed array on the heap.
compiler::Node* LoadFixedArrayElement(compiler::Node* fixed_array, int index);
// Load a field from an object on the heap.
compiler::Node* LoadObjectField(compiler::Node* object, int offset);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment