Commit b4b9fc29 authored by bmeurer's avatar bmeurer Committed by Commit bot

[compiler] Fix a bunch of wrong word size assumptions.

Operations on word size data must be word sized, and not word32.
Currently this only generates worse code, but in the future, it
might even generate wrong code, so we should better get this right
from the beginning.

R=yangguo@chromium.org

Review URL: https://codereview.chromium.org/1748953004

Cr-Commit-Position: refs/heads/master@{#34378}
parent 74d6c64c
...@@ -119,7 +119,7 @@ Node* CodeStubAssembler::LoadStackPointer() { ...@@ -119,7 +119,7 @@ Node* CodeStubAssembler::LoadStackPointer() {
} }
Node* CodeStubAssembler::SmiShiftBitsConstant() { Node* CodeStubAssembler::SmiShiftBitsConstant() {
return Int32Constant(kSmiShiftSize + kSmiTagSize); return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
} }
...@@ -147,13 +147,17 @@ Node* CodeStubAssembler::ChangeInt32ToInt64(Node* value) { ...@@ -147,13 +147,17 @@ Node* CodeStubAssembler::ChangeInt32ToInt64(Node* value) {
return raw_assembler_->ChangeInt32ToInt64(value); return raw_assembler_->ChangeInt32ToInt64(value);
} }
Node* CodeStubAssembler::ChangeUint32ToUint64(Node* value) {
return raw_assembler_->ChangeUint32ToUint64(value);
}
Node* CodeStubAssembler::WordShl(Node* value, int shift) { Node* CodeStubAssembler::WordShl(Node* value, int shift) {
return raw_assembler_->WordShl(value, Int32Constant(shift)); return raw_assembler_->WordShl(value, IntPtrConstant(shift));
} }
Node* CodeStubAssembler::WordIsSmi(Node* a) { Node* CodeStubAssembler::WordIsSmi(Node* a) {
return WordEqual(raw_assembler_->WordAnd(a, Int32Constant(kSmiTagMask)), return WordEqual(raw_assembler_->WordAnd(a, IntPtrConstant(kSmiTagMask)),
Int32Constant(0)); IntPtrConstant(0));
} }
Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset) { Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset) {
...@@ -169,22 +173,22 @@ Node* CodeStubAssembler::LoadObjectField(Node* object, int offset) { ...@@ -169,22 +173,22 @@ Node* CodeStubAssembler::LoadObjectField(Node* object, int offset) {
Node* CodeStubAssembler::LoadFixedArrayElementSmiIndex(Node* object, Node* CodeStubAssembler::LoadFixedArrayElementSmiIndex(Node* object,
Node* smi_index, Node* smi_index,
int additional_offset) { int additional_offset) {
Node* header_size = raw_assembler_->Int32Constant( int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
additional_offset + FixedArray::kHeaderSize - kHeapObjectTag); Node* header_size = IntPtrConstant(additional_offset +
FixedArray::kHeaderSize - kHeapObjectTag);
Node* scaled_index = Node* scaled_index =
(kSmiShiftSize == 0) (kSmiShiftBits > kPointerSizeLog2)
? raw_assembler_->Word32Shl( ? WordSar(smi_index, IntPtrConstant(kSmiShiftBits - kPointerSizeLog2))
smi_index, Int32Constant(kPointerSizeLog2 - kSmiTagSize)) : WordShl(smi_index,
: raw_assembler_->Word32Shl(SmiUntag(smi_index), IntPtrConstant(kPointerSizeLog2 - kSmiShiftBits));
Int32Constant(kPointerSizeLog2)); Node* offset = IntPtrAdd(scaled_index, header_size);
Node* offset = raw_assembler_->Int32Add(scaled_index, header_size); return Load(MachineType::AnyTagged(), object, offset);
return raw_assembler_->Load(MachineType::AnyTagged(), object, offset);
} }
Node* CodeStubAssembler::LoadFixedArrayElementConstantIndex(Node* object, Node* CodeStubAssembler::LoadFixedArrayElementConstantIndex(Node* object,
int index) { int index) {
Node* offset = raw_assembler_->Int32Constant( Node* offset = IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag +
FixedArray::kHeaderSize - kHeapObjectTag + index * kPointerSize); index * kPointerSize);
return raw_assembler_->Load(MachineType::AnyTagged(), object, offset); return raw_assembler_->Load(MachineType::AnyTagged(), object, offset);
} }
...@@ -192,7 +196,7 @@ Node* CodeStubAssembler::LoadRoot(Heap::RootListIndex root_index) { ...@@ -192,7 +196,7 @@ Node* CodeStubAssembler::LoadRoot(Heap::RootListIndex root_index) {
if (isolate()->heap()->RootCanBeTreatedAsConstant(root_index)) { if (isolate()->heap()->RootCanBeTreatedAsConstant(root_index)) {
Handle<Object> root = isolate()->heap()->root_handle(root_index); Handle<Object> root = isolate()->heap()->root_handle(root_index);
if (root->IsSmi()) { if (root->IsSmi()) {
return Int32Constant(Handle<Smi>::cast(root)->value()); return SmiConstant(Smi::cast(*root));
} else { } else {
return HeapConstant(Handle<HeapObject>::cast(root)); return HeapConstant(Handle<HeapObject>::cast(root));
} }
......
...@@ -149,6 +149,7 @@ class CodeStubAssembler { ...@@ -149,6 +149,7 @@ class CodeStubAssembler {
// Conversions // Conversions
Node* ChangeInt32ToInt64(Node* value); Node* ChangeInt32ToInt64(Node* value);
Node* ChangeUint32ToUint64(Node* value);
// Projections // Projections
Node* Projection(int index, Node* value); Node* Projection(int index, Node* value);
......
...@@ -79,7 +79,7 @@ Node* InterpreterAssembler::RegisterLocation(Node* reg_index) { ...@@ -79,7 +79,7 @@ Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
Node* InterpreterAssembler::LoadRegister(int offset) { Node* InterpreterAssembler::LoadRegister(int offset) {
return Load(MachineType::AnyTagged(), RegisterFileRawPointer(), return Load(MachineType::AnyTagged(), RegisterFileRawPointer(),
Int32Constant(offset)); IntPtrConstant(offset));
} }
Node* InterpreterAssembler::LoadRegister(Register reg) { Node* InterpreterAssembler::LoadRegister(Register reg) {
...@@ -97,7 +97,7 @@ Node* InterpreterAssembler::LoadRegister(Node* reg_index) { ...@@ -97,7 +97,7 @@ Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
Node* InterpreterAssembler::StoreRegister(Node* value, int offset) { Node* InterpreterAssembler::StoreRegister(Node* value, int offset) {
return StoreNoWriteBarrier(MachineRepresentation::kTagged, return StoreNoWriteBarrier(MachineRepresentation::kTagged,
RegisterFileRawPointer(), Int32Constant(offset), RegisterFileRawPointer(), IntPtrConstant(offset),
value); value);
} }
...@@ -113,7 +113,7 @@ Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) { ...@@ -113,7 +113,7 @@ Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
Node* InterpreterAssembler::NextRegister(Node* reg_index) { Node* InterpreterAssembler::NextRegister(Node* reg_index) {
// Register indexes are negative, so the next index is minus one. // Register indexes are negative, so the next index is minus one.
return IntPtrAdd(reg_index, Int32Constant(-1)); return IntPtrAdd(reg_index, IntPtrConstant(-1));
} }
Node* InterpreterAssembler::BytecodeOperand(int operand_index) { Node* InterpreterAssembler::BytecodeOperand(int operand_index) {
...@@ -122,7 +122,7 @@ Node* InterpreterAssembler::BytecodeOperand(int operand_index) { ...@@ -122,7 +122,7 @@ Node* InterpreterAssembler::BytecodeOperand(int operand_index) {
Bytecodes::GetOperandSize(bytecode_, operand_index)); Bytecodes::GetOperandSize(bytecode_, operand_index));
return Load( return Load(
MachineType::Uint8(), BytecodeArrayTaggedPointer(), MachineType::Uint8(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset( IntPtrAdd(BytecodeOffset(), IntPtrConstant(Bytecodes::GetOperandOffset(
bytecode_, operand_index)))); bytecode_, operand_index))));
} }
...@@ -132,7 +132,7 @@ Node* InterpreterAssembler::BytecodeOperandSignExtended(int operand_index) { ...@@ -132,7 +132,7 @@ Node* InterpreterAssembler::BytecodeOperandSignExtended(int operand_index) {
Bytecodes::GetOperandSize(bytecode_, operand_index)); Bytecodes::GetOperandSize(bytecode_, operand_index));
Node* load = Load( Node* load = Load(
MachineType::Int8(), BytecodeArrayTaggedPointer(), MachineType::Int8(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset( IntPtrAdd(BytecodeOffset(), IntPtrConstant(Bytecodes::GetOperandOffset(
bytecode_, operand_index)))); bytecode_, operand_index))));
// Ensure that we sign extend to full pointer size // Ensure that we sign extend to full pointer size
if (kPointerSize == 8) { if (kPointerSize == 8) {
...@@ -148,15 +148,16 @@ Node* InterpreterAssembler::BytecodeOperandShort(int operand_index) { ...@@ -148,15 +148,16 @@ Node* InterpreterAssembler::BytecodeOperandShort(int operand_index) {
if (TargetSupportsUnalignedAccess()) { if (TargetSupportsUnalignedAccess()) {
return Load( return Load(
MachineType::Uint16(), BytecodeArrayTaggedPointer(), MachineType::Uint16(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset( IntPtrAdd(BytecodeOffset(), IntPtrConstant(Bytecodes::GetOperandOffset(
bytecode_, operand_index)))); bytecode_, operand_index))));
} else { } else {
int offset = Bytecodes::GetOperandOffset(bytecode_, operand_index); int offset = Bytecodes::GetOperandOffset(bytecode_, operand_index);
Node* first_byte = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), Node* first_byte =
IntPtrAdd(BytecodeOffset(), Int32Constant(offset))); Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), IntPtrConstant(offset)));
Node* second_byte = Node* second_byte =
Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), Int32Constant(offset + 1))); IntPtrAdd(BytecodeOffset(), IntPtrConstant(offset + 1)));
#if V8_TARGET_LITTLE_ENDIAN #if V8_TARGET_LITTLE_ENDIAN
return WordOr(WordShl(second_byte, kBitsPerByte), first_byte); return WordOr(WordShl(second_byte, kBitsPerByte), first_byte);
#elif V8_TARGET_BIG_ENDIAN #elif V8_TARGET_BIG_ENDIAN
...@@ -176,14 +177,14 @@ Node* InterpreterAssembler::BytecodeOperandShortSignExtended( ...@@ -176,14 +177,14 @@ Node* InterpreterAssembler::BytecodeOperandShortSignExtended(
Node* load; Node* load;
if (TargetSupportsUnalignedAccess()) { if (TargetSupportsUnalignedAccess()) {
load = Load(MachineType::Int16(), BytecodeArrayTaggedPointer(), load = Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), Int32Constant(operand_offset))); IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else { } else {
#if V8_TARGET_LITTLE_ENDIAN #if V8_TARGET_LITTLE_ENDIAN
Node* hi_byte_offset = Int32Constant(operand_offset + 1); Node* hi_byte_offset = IntPtrConstant(operand_offset + 1);
Node* lo_byte_offset = Int32Constant(operand_offset); Node* lo_byte_offset = IntPtrConstant(operand_offset);
#elif V8_TARGET_BIG_ENDIAN #elif V8_TARGET_BIG_ENDIAN
Node* hi_byte_offset = Int32Constant(operand_offset); Node* hi_byte_offset = IntPtrConstant(operand_offset);
Node* lo_byte_offset = Int32Constant(operand_offset + 1); Node* lo_byte_offset = IntPtrConstant(operand_offset + 1);
#else #else
#error "Unknown Architecture" #error "Unknown Architecture"
#endif #endif
...@@ -264,14 +265,6 @@ Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) { ...@@ -264,14 +265,6 @@ Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
return Load(MachineType::AnyTagged(), constant_pool, entry_offset); return Load(MachineType::AnyTagged(), constant_pool, entry_offset);
} }
Node* InterpreterAssembler::LoadFixedArrayElement(Node* fixed_array,
int index) {
Node* entry_offset =
IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
WordShl(Int32Constant(index), kPointerSizeLog2));
return Load(MachineType::AnyTagged(), fixed_array, entry_offset);
}
Node* InterpreterAssembler::LoadObjectField(Node* object, int offset) { Node* InterpreterAssembler::LoadObjectField(Node* object, int offset) {
return Load(MachineType::AnyTagged(), object, return Load(MachineType::AnyTagged(), object,
IntPtrConstant(offset - kHeapObjectTag)); IntPtrConstant(offset - kHeapObjectTag));
...@@ -285,7 +278,7 @@ Node* InterpreterAssembler::LoadContextSlot(Node* context, int slot_index) { ...@@ -285,7 +278,7 @@ Node* InterpreterAssembler::LoadContextSlot(Node* context, int slot_index) {
Node* InterpreterAssembler::LoadContextSlot(Node* context, Node* slot_index) { Node* InterpreterAssembler::LoadContextSlot(Node* context, Node* slot_index) {
Node* offset = Node* offset =
IntPtrAdd(WordShl(slot_index, kPointerSizeLog2), IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
Int32Constant(Context::kHeaderSize - kHeapObjectTag)); IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
return Load(MachineType::AnyTagged(), context, offset); return Load(MachineType::AnyTagged(), context, offset);
} }
...@@ -293,7 +286,7 @@ Node* InterpreterAssembler::StoreContextSlot(Node* context, Node* slot_index, ...@@ -293,7 +286,7 @@ Node* InterpreterAssembler::StoreContextSlot(Node* context, Node* slot_index,
Node* value) { Node* value) {
Node* offset = Node* offset =
IntPtrAdd(WordShl(slot_index, kPointerSizeLog2), IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
Int32Constant(Context::kHeaderSize - kHeapObjectTag)); IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
return Store(MachineRepresentation::kTagged, context, offset, value); return Store(MachineRepresentation::kTagged, context, offset, value);
} }
...@@ -368,7 +361,7 @@ Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context, ...@@ -368,7 +361,7 @@ Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
Node* function = IntPtrAdd(function_table, function_offset); Node* function = IntPtrAdd(function_table, function_offset);
Node* function_entry = Node* function_entry =
Load(MachineType::Pointer(), function, Load(MachineType::Pointer(), function,
Int32Constant(offsetof(Runtime::Function, entry))); IntPtrConstant(offsetof(Runtime::Function, entry)));
return CallStub(callable.descriptor(), code_target, context, arg_count, return CallStub(callable.descriptor(), code_target, context, arg_count,
first_arg, function_entry, result_size); first_arg, function_entry, result_size);
...@@ -405,7 +398,7 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight) { ...@@ -405,7 +398,7 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight) {
} }
Node* InterpreterAssembler::Advance(int delta) { Node* InterpreterAssembler::Advance(int delta) {
return IntPtrAdd(BytecodeOffset(), Int32Constant(delta)); return IntPtrAdd(BytecodeOffset(), IntPtrConstant(delta));
} }
Node* InterpreterAssembler::Advance(Node* delta) { Node* InterpreterAssembler::Advance(Node* delta) {
...@@ -444,12 +437,15 @@ void InterpreterAssembler::Dispatch() { ...@@ -444,12 +437,15 @@ void InterpreterAssembler::Dispatch() {
void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) { void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
Node* target_bytecode = Load( Node* target_bytecode = Load(
MachineType::Uint8(), BytecodeArrayTaggedPointer(), new_bytecode_offset); MachineType::Uint8(), BytecodeArrayTaggedPointer(), new_bytecode_offset);
if (kPointerSize == 8) {
target_bytecode = ChangeUint32ToUint64(target_bytecode);
}
// TODO(rmcilroy): Create a code target dispatch table to avoid conversion // TODO(rmcilroy): Create a code target dispatch table to avoid conversion
// from code object on every dispatch. // from code object on every dispatch.
Node* target_code_object = Node* target_code_object =
Load(MachineType::Pointer(), DispatchTableRawPointer(), Load(MachineType::Pointer(), DispatchTableRawPointer(),
Word32Shl(target_bytecode, Int32Constant(kPointerSizeLog2))); WordShl(target_bytecode, IntPtrConstant(kPointerSizeLog2)));
DispatchToBytecodeHandler(target_code_object, new_bytecode_offset); DispatchToBytecodeHandler(target_code_object, new_bytecode_offset);
} }
......
...@@ -62,9 +62,6 @@ class InterpreterAssembler : public compiler::CodeStubAssembler { ...@@ -62,9 +62,6 @@ class InterpreterAssembler : public compiler::CodeStubAssembler {
// Load constant at |index| in the constant pool. // Load constant at |index| in the constant pool.
compiler::Node* LoadConstantPoolEntry(compiler::Node* index); compiler::Node* LoadConstantPoolEntry(compiler::Node* index);
// Load an element from a fixed array on the heap.
compiler::Node* LoadFixedArrayElement(compiler::Node* fixed_array, int index);
// Load a field from an object on the heap. // Load a field from an object on the heap.
compiler::Node* LoadObjectField(compiler::Node* object, int offset); compiler::Node* LoadObjectField(compiler::Node* object, int offset);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment