Commit c516d4f0 authored by Benedikt Meurer's avatar Benedikt Meurer

[turbofan] Add checked load/store operators.

TEST=mjsunit,cctest,unittests
R=jarin@chromium.org

Review URL: https://codereview.chromium.org/763963002

Cr-Commit-Position: refs/heads/master@{#25591}
parent ce524675
......@@ -2485,6 +2485,12 @@ void Assembler::vstm(BlockAddrMode am,
}
void Assembler::vmov(const SwVfpRegister dst, float imm) {
mov(ip, Operand(bit_cast<int32_t>(imm)));
vmov(dst, ip);
}
static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
memcpy(&i, &d, 8);
......
......@@ -1180,6 +1180,7 @@ class Assembler : public AssemblerBase {
SwVfpRegister last,
Condition cond = al);
void vmov(const SwVfpRegister dst, float imm);
void vmov(const DwVfpRegister dst,
double imm,
const Register scratch = no_reg);
......
......@@ -74,8 +74,7 @@ FieldAccess AccessBuilder::ForValue() {
// static
ElementAccess AccessBuilder::ForFixedArrayElement() {
return {kNoBoundsCheck, kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
kMachAnyTagged};
return {kTaggedBase, FixedArray::kHeaderSize, Type::Any(), kMachAnyTagged};
}
......@@ -86,33 +85,25 @@ ElementAccess AccessBuilder::ForTypedArrayElement(ExternalArrayType type,
int header_size = is_external ? 0 : FixedTypedArrayBase::kDataOffset;
switch (type) {
case kExternalInt8Array:
return {kTypedArrayBoundsCheck, taggedness, header_size, Type::Signed32(),
kMachInt8};
return {taggedness, header_size, Type::Signed32(), kMachInt8};
case kExternalUint8Array:
case kExternalUint8ClampedArray:
return {kTypedArrayBoundsCheck, taggedness, header_size,
Type::Unsigned32(), kMachUint8};
return {taggedness, header_size, Type::Unsigned32(), kMachUint8};
case kExternalInt16Array:
return {kTypedArrayBoundsCheck, taggedness, header_size, Type::Signed32(),
kMachInt16};
return {taggedness, header_size, Type::Signed32(), kMachInt16};
case kExternalUint16Array:
return {kTypedArrayBoundsCheck, taggedness, header_size,
Type::Unsigned32(), kMachUint16};
return {taggedness, header_size, Type::Unsigned32(), kMachUint16};
case kExternalInt32Array:
return {kTypedArrayBoundsCheck, taggedness, header_size, Type::Signed32(),
kMachInt32};
return {taggedness, header_size, Type::Signed32(), kMachInt32};
case kExternalUint32Array:
return {kTypedArrayBoundsCheck, taggedness, header_size,
Type::Unsigned32(), kMachUint32};
return {taggedness, header_size, Type::Unsigned32(), kMachUint32};
case kExternalFloat32Array:
return {kTypedArrayBoundsCheck, taggedness, header_size, Type::Number(),
kMachFloat32};
return {taggedness, header_size, Type::Number(), kMachFloat32};
case kExternalFloat64Array:
return {kTypedArrayBoundsCheck, taggedness, header_size, Type::Number(),
kMachFloat64};
return {taggedness, header_size, Type::Number(), kMachFloat64};
}
UNREACHABLE();
return {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::None(), kMachNone};
return {kUntaggedBase, 0, Type::None(), kMachNone};
}
} // namespace compiler
......
......@@ -142,9 +142,8 @@ class ArmOperandConverter FINAL : public InstructionOperandConverter {
return MemOperand(r0);
}
MemOperand InputOffset() {
int index = 0;
return InputOffset(&index);
MemOperand InputOffset(int first_index = 0) {
return InputOffset(&first_index);
}
MemOperand ToMemOperand(InstructionOperand* op) const {
......@@ -159,6 +158,112 @@ class ArmOperandConverter FINAL : public InstructionOperandConverter {
};
namespace {
class OutOfLineLoadFloat32 FINAL : public OutOfLineCode {
public:
OutOfLineLoadFloat32(CodeGenerator* gen, SwVfpRegister result)
: OutOfLineCode(gen), result_(result) {}
void Generate() FINAL {
__ vmov(result_, std::numeric_limits<float>::quiet_NaN());
}
private:
SwVfpRegister const result_;
};
class OutOfLineLoadFloat64 FINAL : public OutOfLineCode {
public:
OutOfLineLoadFloat64(CodeGenerator* gen, DwVfpRegister result)
: OutOfLineCode(gen), result_(result) {}
void Generate() FINAL {
__ vmov(result_, std::numeric_limits<double>::quiet_NaN(), kScratchReg);
}
private:
DwVfpRegister const result_;
};
class OutOfLineLoadInteger FINAL : public OutOfLineCode {
public:
OutOfLineLoadInteger(CodeGenerator* gen, Register result)
: OutOfLineCode(gen), result_(result) {}
void Generate() FINAL { __ mov(result_, Operand::Zero()); }
private:
Register const result_;
};
} // namespace
#define ASSEMBLE_CHECKED_LOAD_FLOAT(width) \
do { \
auto result = i.OutputFloat##width##Register(); \
auto offset = i.InputRegister(0); \
if (instr->InputAt(1)->IsRegister()) { \
__ cmp(offset, i.InputRegister(1)); \
} else { \
__ cmp(offset, i.InputImmediate(1)); \
} \
auto ool = new (zone()) OutOfLineLoadFloat##width(this, result); \
__ b(hs, ool->entry()); \
__ vldr(result, i.InputOffset(2)); \
__ bind(ool->exit()); \
DCHECK_EQ(LeaveCC, i.OutputSBit()); \
} while (0)
#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
do { \
auto result = i.OutputRegister(); \
auto offset = i.InputRegister(0); \
if (instr->InputAt(1)->IsRegister()) { \
__ cmp(offset, i.InputRegister(1)); \
} else { \
__ cmp(offset, i.InputImmediate(1)); \
} \
auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
__ b(hs, ool->entry()); \
__ asm_instr(result, i.InputOffset(2)); \
__ bind(ool->exit()); \
DCHECK_EQ(LeaveCC, i.OutputSBit()); \
} while (0)
#define ASSEMBLE_CHECKED_STORE_FLOAT(width) \
do { \
auto offset = i.InputRegister(0); \
if (instr->InputAt(1)->IsRegister()) { \
__ cmp(offset, i.InputRegister(1)); \
} else { \
__ cmp(offset, i.InputImmediate(1)); \
} \
auto value = i.InputFloat##width##Register(2); \
__ vstr(value, i.InputOffset(3), lo); \
DCHECK_EQ(LeaveCC, i.OutputSBit()); \
} while (0)
#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
do { \
auto offset = i.InputRegister(0); \
if (instr->InputAt(1)->IsRegister()) { \
__ cmp(offset, i.InputRegister(1)); \
} else { \
__ cmp(offset, i.InputImmediate(1)); \
} \
auto value = i.InputRegister(2); \
__ asm_instr(value, i.InputOffset(3), lo); \
DCHECK_EQ(LeaveCC, i.OutputSBit()); \
} while (0)
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ArmOperandConverter i(this, instr);
......@@ -535,6 +640,42 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsb);
break;
case kCheckedLoadUint8:
ASSEMBLE_CHECKED_LOAD_INTEGER(ldrb);
break;
case kCheckedLoadInt16:
ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsh);
break;
case kCheckedLoadUint16:
ASSEMBLE_CHECKED_LOAD_INTEGER(ldrh);
break;
case kCheckedLoadWord32:
ASSEMBLE_CHECKED_LOAD_INTEGER(ldr);
break;
case kCheckedLoadFloat32:
ASSEMBLE_CHECKED_LOAD_FLOAT(32);
break;
case kCheckedLoadFloat64:
ASSEMBLE_CHECKED_LOAD_FLOAT(64);
break;
case kCheckedStoreWord8:
ASSEMBLE_CHECKED_STORE_INTEGER(strb);
break;
case kCheckedStoreWord16:
ASSEMBLE_CHECKED_STORE_INTEGER(strh);
break;
case kCheckedStoreWord32:
ASSEMBLE_CHECKED_STORE_INTEGER(str);
break;
case kCheckedStoreFloat32:
ASSEMBLE_CHECKED_STORE_FLOAT(32);
break;
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_FLOAT(64);
break;
}
}
......@@ -828,21 +969,20 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
SwVfpRegister dst = destination->IsDoubleRegister()
? g.ToFloat32Register(destination)
: kScratchDoubleReg.low();
// TODO(turbofan): Can we do better here?
__ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
__ vmov(dst, ip);
if (destination->IsDoubleStackSlot()) {
__ vstr(dst, g.ToMemOperand(destination));
MemOperand dst = g.ToMemOperand(destination);
__ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
__ str(ip, dst);
} else {
SwVfpRegister dst = g.ToFloat32Register(destination);
__ vmov(dst, src.ToFloat32());
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
DwVfpRegister dst = destination->IsDoubleRegister()
? g.ToFloat64Register(destination)
: kScratchDoubleReg;
__ vmov(dst, src.ToFloat64());
__ vmov(dst, src.ToFloat64(), kScratchReg);
if (destination->IsDoubleStackSlot()) {
__ vstr(dst, g.ToMemOperand(destination));
}
......
......@@ -350,6 +350,82 @@ void InstructionSelector::VisitStore(Node* node) {
}
void InstructionSelector::VisitCheckedLoad(Node* node) {
MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
MachineType typ = TypeOf(OpParameter<MachineType>(node));
ArmOperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
ArchOpcode opcode;
switch (rep) {
case kRepWord8:
opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
case kRepWord16:
opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
case kRepWord32:
opcode = kCheckedLoadWord32;
break;
case kRepFloat32:
opcode = kCheckedLoadFloat32;
break;
case kRepFloat64:
opcode = kCheckedLoadFloat64;
break;
default:
UNREACHABLE();
return;
}
InstructionOperand* offset_operand = g.UseRegister(offset);
InstructionOperand* length_operand = g.CanBeImmediate(length, kArmCmp)
? g.UseImmediate(length)
: g.UseRegister(length);
Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
g.DefineAsRegister(node), offset_operand, length_operand,
g.UseRegister(buffer), offset_operand);
}
void InstructionSelector::VisitCheckedStore(Node* node) {
MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
ArmOperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
ArchOpcode opcode;
switch (rep) {
case kRepWord8:
opcode = kCheckedStoreWord8;
break;
case kRepWord16:
opcode = kCheckedStoreWord16;
break;
case kRepWord32:
opcode = kCheckedStoreWord32;
break;
case kRepFloat32:
opcode = kCheckedStoreFloat32;
break;
case kRepFloat64:
opcode = kCheckedStoreFloat64;
break;
default:
UNREACHABLE();
return;
}
InstructionOperand* offset_operand = g.UseRegister(offset);
InstructionOperand* length_operand = g.CanBeImmediate(length, kArmCmp)
? g.UseImmediate(length)
: g.UseRegister(length);
Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), nullptr,
offset_operand, length_operand, g.UseRegister(value),
g.UseRegister(buffer), offset_operand);
}
namespace {
void EmitBic(InstructionSelector* selector, Node* node, Node* left,
......
......@@ -24,6 +24,18 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
Arm64OperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
DoubleRegister InputFloat32Register(int index) {
return InputDoubleRegister(index).S();
}
DoubleRegister InputFloat64Register(int index) {
return InputDoubleRegister(index);
}
DoubleRegister OutputFloat32Register() { return OutputDoubleRegister().S(); }
DoubleRegister OutputFloat64Register() { return OutputDoubleRegister(); }
Register InputRegister32(int index) {
return ToRegister(instr_->InputAt(index)).W();
}
......@@ -106,9 +118,8 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
return MemOperand(no_reg);
}
MemOperand MemoryOperand() {
int index = 0;
return MemoryOperand(&index);
MemOperand MemoryOperand(int first_index = 0) {
return MemoryOperand(&first_index);
}
Operand ToOperand(InstructionOperand* op) {
......@@ -163,6 +174,100 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
};
namespace {
class OutOfLineLoadFloat32 FINAL : public OutOfLineCode {
public:
OutOfLineLoadFloat32(CodeGenerator* gen, DoubleRegister result)
: OutOfLineCode(gen), result_(result) {}
void Generate() FINAL {
__ Fmov(result_, std::numeric_limits<float>::quiet_NaN());
}
private:
DoubleRegister const result_;
};
class OutOfLineLoadFloat64 FINAL : public OutOfLineCode {
public:
OutOfLineLoadFloat64(CodeGenerator* gen, DoubleRegister result)
: OutOfLineCode(gen), result_(result) {}
void Generate() FINAL {
__ Fmov(result_, std::numeric_limits<double>::quiet_NaN());
}
private:
DoubleRegister const result_;
};
class OutOfLineLoadInteger FINAL : public OutOfLineCode {
public:
OutOfLineLoadInteger(CodeGenerator* gen, Register result)
: OutOfLineCode(gen), result_(result) {}
void Generate() FINAL { __ Mov(result_, 0); }
private:
Register const result_;
};
} // namespace
#define ASSEMBLE_CHECKED_LOAD_FLOAT(width) \
do { \
auto result = i.OutputFloat##width##Register(); \
auto offset = i.InputRegister32(0); \
auto length = i.InputOperand32(1); \
__ Cmp(offset, length); \
auto ool = new (zone()) OutOfLineLoadFloat##width(this, result); \
__ B(hs, ool->entry()); \
__ Ldr(result, i.MemoryOperand(2)); \
__ Bind(ool->exit()); \
} while (0)
#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
do { \
auto result = i.OutputRegister32(); \
auto offset = i.InputRegister32(0); \
auto length = i.InputOperand32(1); \
__ Cmp(offset, length); \
auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
__ B(hs, ool->entry()); \
__ asm_instr(result, i.MemoryOperand(2)); \
__ Bind(ool->exit()); \
} while (0)
#define ASSEMBLE_CHECKED_STORE_FLOAT(width) \
do { \
auto offset = i.InputRegister32(0); \
auto length = i.InputOperand32(1); \
__ Cmp(offset, length); \
Label done; \
__ B(hs, &done); \
__ Str(i.InputFloat##width##Register(2), i.MemoryOperand(3)); \
__ Bind(&done); \
} while (0)
#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
do { \
auto offset = i.InputRegister32(0); \
auto length = i.InputOperand32(1); \
__ Cmp(offset, length); \
Label done; \
__ B(hs, &done); \
__ asm_instr(i.InputRegister32(2), i.MemoryOperand(3)); \
__ Bind(&done); \
} while (0)
#define ASSEMBLE_SHIFT(asm_instr, width) \
do { \
if (instr->InputAt(1)->IsRegister()) { \
......@@ -616,6 +721,42 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
}
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsb);
break;
case kCheckedLoadUint8:
ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrb);
break;
case kCheckedLoadInt16:
ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsh);
break;
case kCheckedLoadUint16:
ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrh);
break;
case kCheckedLoadWord32:
ASSEMBLE_CHECKED_LOAD_INTEGER(Ldr);
break;
case kCheckedLoadFloat32:
ASSEMBLE_CHECKED_LOAD_FLOAT(32);
break;
case kCheckedLoadFloat64:
ASSEMBLE_CHECKED_LOAD_FLOAT(64);
break;
case kCheckedStoreWord8:
ASSEMBLE_CHECKED_STORE_INTEGER(Strb);
break;
case kCheckedStoreWord16:
ASSEMBLE_CHECKED_STORE_INTEGER(Strh);
break;
case kCheckedStoreWord32:
ASSEMBLE_CHECKED_STORE_INTEGER(Str);
break;
case kCheckedStoreFloat32:
ASSEMBLE_CHECKED_STORE_FLOAT(32);
break;
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_FLOAT(64);
break;
}
}
......@@ -770,7 +911,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
cc = vc;
break;
}
__ bind(&check);
__ Bind(&check);
__ Cset(reg, cc);
__ Bind(&done);
}
......
......@@ -362,6 +362,76 @@ void InstructionSelector::VisitStore(Node* node) {
}
void InstructionSelector::VisitCheckedLoad(Node* node) {
MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
MachineType typ = TypeOf(OpParameter<MachineType>(node));
Arm64OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
ArchOpcode opcode;
switch (rep) {
case kRepWord8:
opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
case kRepWord16:
opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
case kRepWord32:
opcode = kCheckedLoadWord32;
break;
case kRepFloat32:
opcode = kCheckedLoadFloat32;
break;
case kRepFloat64:
opcode = kCheckedLoadFloat64;
break;
default:
UNREACHABLE();
return;
}
InstructionOperand* offset_operand = g.UseRegister(offset);
Emit(opcode | AddressingModeField::encode(kMode_MRR),
g.DefineAsRegister(node), offset_operand, g.UseRegister(length),
g.UseRegister(buffer), offset_operand);
}
void InstructionSelector::VisitCheckedStore(Node* node) {
MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
Arm64OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
ArchOpcode opcode;
switch (rep) {
case kRepWord8:
opcode = kCheckedStoreWord8;
break;
case kRepWord16:
opcode = kCheckedStoreWord16;
break;
case kRepWord32:
opcode = kCheckedStoreWord32;
break;
case kRepFloat32:
opcode = kCheckedStoreFloat32;
break;
case kRepFloat64:
opcode = kCheckedStoreFloat64;
break;
default:
UNREACHABLE();
return;
}
InstructionOperand* offset_operand = g.UseRegister(offset);
Emit(opcode | AddressingModeField::encode(kMode_MRR), nullptr, offset_operand,
g.UseRegister(length), g.UseRegister(value), g.UseRegister(buffer),
offset_operand);
}
template <typename Matcher>
static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
ArchOpcode opcode, bool left_can_cover,
......
......@@ -118,6 +118,27 @@ class InstructionOperandConverter {
};
// Generator for out-of-line code that is emitted after the main code is done.
class OutOfLineCode : public ZoneObject {
public:
explicit OutOfLineCode(CodeGenerator* gen);
virtual ~OutOfLineCode();
virtual void Generate() = 0;
Label* entry() { return &entry_; }
Label* exit() { return &exit_; }
MacroAssembler* masm() const { return masm_; }
OutOfLineCode* next() const { return next_; }
private:
Label entry_;
Label exit_;
MacroAssembler* const masm_;
OutOfLineCode* const next_;
};
// TODO(dcarney): generify this on bleeding_edge and replace this call
// when merged.
static inline void FinishCode(MacroAssembler* masm) {
......
......@@ -27,7 +27,8 @@ CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
deoptimization_states_(code->zone()),
deoptimization_literals_(code->zone()),
translations_(code->zone()),
last_lazy_deopt_pc_(0) {
last_lazy_deopt_pc_(0),
ools_(nullptr) {
for (int i = 0; i < code->InstructionBlockCount(); ++i) {
new (&labels_[i]) Label;
}
......@@ -71,6 +72,16 @@ Handle<Code> CodeGenerator::GenerateCode() {
}
}
// Assemble all out-of-line code.
if (ools_) {
masm()->RecordComment("-- Out of line code --");
for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) {
masm()->bind(ool->entry());
ool->Generate();
masm()->jmp(ool->exit());
}
}
FinishCode(masm());
// Ensure there is space for lazy deopt.
......@@ -555,6 +566,15 @@ void CodeGenerator::AddNopForSmiCodeInlining() { UNIMPLEMENTED(); }
#endif // !V8_TURBOFAN_BACKEND
OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
: masm_(gen->masm()), next_(gen->ools_) {
gen->ools_ = this;
}
OutOfLineCode::~OutOfLineCode() {}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -5,8 +5,6 @@
#ifndef V8_COMPILER_CODE_GENERATOR_H_
#define V8_COMPILER_CODE_GENERATOR_H_
#include <deque>
#include "src/compiler/gap-resolver.h"
#include "src/compiler/instruction.h"
#include "src/deoptimizer.h"
......@@ -17,7 +15,9 @@ namespace v8 {
namespace internal {
namespace compiler {
// Forward declarations.
class Linkage;
class OutOfLineCode;
struct BranchInfo {
FlagsCondition condition;
......@@ -129,6 +129,8 @@ class CodeGenerator FINAL : public GapResolver::Assembler {
int pc_offset_;
};
friend class OutOfLineCode;
Frame* const frame_;
Linkage* const linkage_;
InstructionSequence* const code_;
......@@ -143,6 +145,7 @@ class CodeGenerator FINAL : public GapResolver::Assembler {
ZoneDeque<Handle<Object> > deoptimization_literals_;
TranslationBuffer translations_;
int last_lazy_deopt_pc_;
OutOfLineCode* ools_;
};
} // namespace compiler
......
......@@ -155,18 +155,111 @@ class IA32OperandConverter : public InstructionOperandConverter {
return Operand(no_reg, 0);
}
Operand MemoryOperand() {
int first_input = 0;
Operand MemoryOperand(int first_input = 0) {
return MemoryOperand(&first_input);
}
};
static bool HasImmediateInput(Instruction* instr, int index) {
namespace {
bool HasImmediateInput(Instruction* instr, int index) {
return instr->InputAt(index)->IsImmediate();
}
class OutOfLineLoadInteger FINAL : public OutOfLineCode {
public:
OutOfLineLoadInteger(CodeGenerator* gen, Register result)
: OutOfLineCode(gen), result_(result) {}
void Generate() FINAL { __ xor_(result_, result_); }
private:
Register const result_;
};
class OutOfLineLoadFloat FINAL : public OutOfLineCode {
public:
OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result)
: OutOfLineCode(gen), result_(result) {}
void Generate() FINAL { __ pcmpeqd(result_, result_); }
private:
XMMRegister const result_;
};
} // namespace
#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
do { \
auto result = i.OutputDoubleRegister(); \
auto offset = i.InputRegister(0); \
if (instr->InputAt(1)->IsRegister()) { \
__ cmp(offset, i.InputRegister(1)); \
} else { \
__ cmp(offset, i.InputImmediate(1)); \
} \
OutOfLineCode* ool = new (zone()) OutOfLineLoadFloat(this, result); \
__ j(above_equal, ool->entry()); \
__ asm_instr(result, i.MemoryOperand(2)); \
__ bind(ool->exit()); \
} while (false)
#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
do { \
auto result = i.OutputRegister(); \
auto offset = i.InputRegister(0); \
if (instr->InputAt(1)->IsRegister()) { \
__ cmp(offset, i.InputRegister(1)); \
} else { \
__ cmp(offset, i.InputImmediate(1)); \
} \
OutOfLineCode* ool = new (zone()) OutOfLineLoadInteger(this, result); \
__ j(above_equal, ool->entry()); \
__ asm_instr(result, i.MemoryOperand(2)); \
__ bind(ool->exit()); \
} while (false)
#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
do { \
auto offset = i.InputRegister(0); \
if (instr->InputAt(1)->IsRegister()) { \
__ cmp(offset, i.InputRegister(1)); \
} else { \
__ cmp(offset, i.InputImmediate(1)); \
} \
Label done; \
__ j(above_equal, &done, Label::kNear); \
__ asm_instr(i.MemoryOperand(3), i.InputDoubleRegister(2)); \
__ bind(&done); \
} while (false)
#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
do { \
auto offset = i.InputRegister(0); \
if (instr->InputAt(1)->IsRegister()) { \
__ cmp(offset, i.InputRegister(1)); \
} else { \
__ cmp(offset, i.InputImmediate(1)); \
} \
Label done; \
__ j(above_equal, &done, Label::kNear); \
if (instr->InputAt(2)->IsRegister()) { \
__ asm_instr(i.MemoryOperand(3), i.InputRegister(2)); \
} else { \
__ asm_instr(i.MemoryOperand(3), i.InputImmediate(2)); \
} \
__ bind(&done); \
} while (false)
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
IA32OperandConverter i(this, instr);
......@@ -483,6 +576,42 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ RecordWrite(object, index, value, mode);
break;
}
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
break;
case kCheckedLoadUint8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_b);
break;
case kCheckedLoadInt16:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_w);
break;
case kCheckedLoadUint16:
ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_w);
break;
case kCheckedLoadWord32:
ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
break;
case kCheckedLoadFloat32:
ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
break;
case kCheckedLoadFloat64:
ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
break;
case kCheckedStoreWord8:
ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
break;
case kCheckedStoreWord16:
ASSEMBLE_CHECKED_STORE_INTEGER(mov_w);
break;
case kCheckedStoreWord32:
ASSEMBLE_CHECKED_STORE_INTEGER(mov);
break;
case kCheckedStoreFloat32:
ASSEMBLE_CHECKED_STORE_FLOAT(movss);
break;
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
break;
}
}
......
......@@ -426,6 +426,142 @@ void InstructionSelector::VisitStore(Node* node) {
}
void InstructionSelector::VisitCheckedLoad(Node* node) {
MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
MachineType typ = TypeOf(OpParameter<MachineType>(node));
IA32OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
ArchOpcode opcode;
switch (rep) {
case kRepWord8:
opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
case kRepWord16:
opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
case kRepWord32:
opcode = kCheckedLoadWord32;
break;
case kRepFloat32:
opcode = kCheckedLoadFloat32;
break;
case kRepFloat64:
opcode = kCheckedLoadFloat64;
break;
default:
UNREACHABLE();
return;
}
if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
Int32Matcher mlength(length);
Int32BinopMatcher moffset(offset);
if (mlength.HasValue() && moffset.right().HasValue() &&
mlength.Value() > moffset.right().Value()) {
Int32Matcher mbuffer(buffer);
InstructionOperand* offset_operand = g.UseRegister(moffset.left().node());
InstructionOperand* length_operand =
g.TempImmediate(mlength.Value() - moffset.right().Value());
if (mbuffer.HasValue()) {
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), offset_operand, length_operand,
offset_operand,
g.TempImmediate(mbuffer.Value() + moffset.right().Value()));
} else {
Emit(opcode | AddressingModeField::encode(kMode_MR1I),
g.DefineAsRegister(node), offset_operand, length_operand,
g.UseRegister(buffer), offset_operand,
g.UseImmediate(moffset.right().node()));
}
return;
}
}
InstructionOperand* offset_operand = g.UseRegister(offset);
InstructionOperand* length_operand =
g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
if (g.CanBeImmediate(buffer)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), offset_operand, length_operand,
offset_operand, g.UseImmediate(buffer));
} else {
Emit(opcode | AddressingModeField::encode(kMode_MR1),
g.DefineAsRegister(node), offset_operand, length_operand,
g.UseRegister(buffer), offset_operand);
}
}
void InstructionSelector::VisitCheckedStore(Node* node) {
MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
IA32OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
ArchOpcode opcode;
switch (rep) {
case kRepWord8:
opcode = kCheckedStoreWord8;
break;
case kRepWord16:
opcode = kCheckedStoreWord16;
break;
case kRepWord32:
opcode = kCheckedStoreWord32;
break;
case kRepFloat32:
opcode = kCheckedStoreFloat32;
break;
case kRepFloat64:
opcode = kCheckedStoreFloat64;
break;
default:
UNREACHABLE();
return;
}
InstructionOperand* value_operand =
g.CanBeImmediate(value)
? g.UseImmediate(value)
: ((rep == kRepWord8 || rep == kRepBit) ? g.UseByteRegister(value)
: g.UseRegister(value));
if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
Int32Matcher mbuffer(buffer);
Int32Matcher mlength(length);
Int32BinopMatcher moffset(offset);
if (mlength.HasValue() && moffset.right().HasValue() &&
mlength.Value() > moffset.right().Value()) {
InstructionOperand* offset_operand = g.UseRegister(moffset.left().node());
InstructionOperand* length_operand =
g.TempImmediate(mlength.Value() - moffset.right().Value());
if (mbuffer.HasValue()) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
offset_operand, length_operand, value_operand, offset_operand,
g.TempImmediate(mbuffer.Value() + moffset.right().Value()));
} else {
Emit(opcode | AddressingModeField::encode(kMode_MR1I), nullptr,
offset_operand, length_operand, value_operand,
g.UseRegister(buffer), offset_operand,
g.UseImmediate(moffset.right().node()));
}
return;
}
}
InstructionOperand* offset_operand = g.UseRegister(offset);
InstructionOperand* length_operand =
g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
if (g.CanBeImmediate(buffer)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
offset_operand, length_operand, value_operand, offset_operand,
g.UseImmediate(buffer));
} else {
Emit(opcode | AddressingModeField::encode(kMode_MR1), nullptr,
offset_operand, length_operand, value_operand, g.UseRegister(buffer),
offset_operand);
}
}
// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
......
......@@ -39,6 +39,18 @@ namespace compiler {
V(ArchRet) \
V(ArchStackPointer) \
V(ArchTruncateDoubleToI) \
V(CheckedLoadInt8) \
V(CheckedLoadUint8) \
V(CheckedLoadInt16) \
V(CheckedLoadUint16) \
V(CheckedLoadWord32) \
V(CheckedLoadFloat32) \
V(CheckedLoadFloat64) \
V(CheckedStoreWord8) \
V(CheckedStoreWord16) \
V(CheckedStoreWord32) \
V(CheckedStoreFloat32) \
V(CheckedStoreFloat64) \
TARGET_ARCH_OPCODE_LIST(V)
enum ArchOpcode {
......
......@@ -132,6 +132,31 @@ Instruction* InstructionSelector::Emit(
}
Instruction* InstructionSelector::Emit(
InstructionCode opcode, InstructionOperand* output, InstructionOperand* a,
InstructionOperand* b, InstructionOperand* c, InstructionOperand* d,
InstructionOperand* e, size_t temp_count, InstructionOperand** temps) {
size_t output_count = output == NULL ? 0 : 1;
InstructionOperand* inputs[] = {a, b, c, d, e};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
}
Instruction* InstructionSelector::Emit(
InstructionCode opcode, InstructionOperand* output, InstructionOperand* a,
InstructionOperand* b, InstructionOperand* c, InstructionOperand* d,
InstructionOperand* e, InstructionOperand* f, size_t temp_count,
InstructionOperand** temps) {
size_t output_count = output == NULL ? 0 : 1;
InstructionOperand* inputs[] = {a, b, c, d, e, f};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
}
Instruction* InstructionSelector::Emit(
InstructionCode opcode, size_t output_count, InstructionOperand** outputs,
size_t input_count, InstructionOperand** inputs, size_t temp_count,
......@@ -538,6 +563,10 @@ MachineType InstructionSelector::GetMachineType(Node* node) {
return OpParameter<LoadRepresentation>(node);
case IrOpcode::kStore:
return kMachNone;
case IrOpcode::kCheckedLoad:
return OpParameter<MachineType>(node);
case IrOpcode::kCheckedStore:
return kMachNone;
case IrOpcode::kWord32And:
case IrOpcode::kWord32Or:
case IrOpcode::kWord32Xor:
......@@ -808,6 +837,13 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsDouble(node), VisitFloat64RoundTiesAway(node);
case IrOpcode::kLoadStackPointer:
return VisitLoadStackPointer(node);
case IrOpcode::kCheckedLoad: {
MachineType rep = OpParameter<MachineType>(node);
MarkAsRepresentation(rep, node);
return VisitCheckedLoad(node);
}
case IrOpcode::kCheckedStore:
return VisitCheckedStore(node);
default:
V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
node->opcode(), node->op()->mnemonic(), node->id());
......
......@@ -59,6 +59,16 @@ class InstructionSelector FINAL {
InstructionOperand* a, InstructionOperand* b,
InstructionOperand* c, InstructionOperand* d,
size_t temp_count = 0, InstructionOperand* *temps = NULL);
Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
InstructionOperand* a, InstructionOperand* b,
InstructionOperand* c, InstructionOperand* d,
InstructionOperand* e, size_t temp_count = 0,
InstructionOperand* *temps = NULL);
Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
InstructionOperand* a, InstructionOperand* b,
InstructionOperand* c, InstructionOperand* d,
InstructionOperand* e, InstructionOperand* f,
size_t temp_count = 0, InstructionOperand* *temps = NULL);
Instruction* Emit(InstructionCode opcode, size_t output_count,
InstructionOperand** outputs, size_t input_count,
InstructionOperand** inputs, size_t temp_count = 0,
......
......@@ -86,6 +86,10 @@ class JSGraph : public ZoneObject {
return machine()->Is32() ? Int32Constant(static_cast<int32_t>(value))
: Int64Constant(static_cast<int64_t>(value));
}
template <typename T>
Node* PointerConstant(T* value) {
return IntPtrConstant(bit_cast<intptr_t>(value));
}
// Creates a Float32Constant node, usually canonicalized.
Node* Float32Constant(float value);
......
......@@ -7,6 +7,7 @@
#include "src/compiler/js-builtin-reducer.h"
#include "src/compiler/js-typed-lowering.h"
#include "src/compiler/node-aux-data-inl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties-inl.h"
#include "src/types.h"
......@@ -38,12 +39,14 @@ JSTypedLowering::JSTypedLowering(JSGraph* jsgraph)
one_range_ = Type::Range(one, one, zone());
Handle<Object> thirtyone = factory->NewNumber(31.0);
zero_thirtyone_range_ = Type::Range(zero, thirtyone, zone());
for (size_t k = 0; k < arraysize(shifted_int32_ranges_); ++k) {
Handle<Object> min = factory->NewNumber(kMinInt / (1 << k));
Handle<Object> max = factory->NewNumber(kMaxInt / (1 << k));
shifted_int32_ranges_[k] = Type::Range(min, max, zone());
}
}
JSTypedLowering::~JSTypedLowering() {}
Reduction JSTypedLowering::ReplaceEagerly(Node* old, Node* node) {
NodeProperties::ReplaceWithValue(old, node, node);
return Changed(node);
......@@ -674,27 +677,37 @@ Reduction JSTypedLowering::ReduceJSLoadProperty(Node* node) {
Type* base_type = NodeProperties::GetBounds(base).upper;
// TODO(mstarzinger): This lowering is not correct if:
// a) The typed array or it's buffer is neutered.
if (base_type->IsConstant() && key_type->Is(Type::Integral32()) &&
if (base_type->IsConstant() &&
base_type->AsConstant()->Value()->IsJSTypedArray()) {
// JSLoadProperty(typed-array, int32)
Handle<JSTypedArray> array =
Handle<JSTypedArray> const array =
Handle<JSTypedArray>::cast(base_type->AsConstant()->Value());
if (IsExternalArrayElementsKind(array->map()->elements_kind())) {
ExternalArrayType type = array->type();
double byte_length = array->byte_length()->Number();
if (byte_length <= kMaxInt) {
BufferAccess const access(array->type());
size_t const k = ElementSizeLog2Of(access.machine_type());
double const byte_length = array->byte_length()->Number();
CHECK_LT(k, arraysize(shifted_int32_ranges_));
if (IsExternalArrayElementsKind(array->map()->elements_kind()) &&
access.external_array_type() != kExternalUint8ClampedArray &&
key_type->Is(shifted_int32_ranges_[k]) && byte_length <= kMaxInt) {
// JSLoadProperty(typed-array, int32)
Handle<ExternalArray> elements =
Handle<ExternalArray>::cast(handle(array->elements()));
Node* pointer = jsgraph()->IntPtrConstant(
bit_cast<intptr_t>(elements->external_pointer()));
Node* length = jsgraph()->Constant(array->length()->Number());
Node* buffer = jsgraph()->PointerConstant(elements->external_pointer());
Node* length = jsgraph()->Constant(byte_length);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
// Check if we can avoid the bounds check.
if (key_type->Min() >= 0 && key_type->Max() < array->length()->Number()) {
Node* load = graph()->NewNode(
simplified()->LoadElement(
AccessBuilder::ForTypedArrayElement(type, true)),
pointer, key, length, effect);
AccessBuilder::ForTypedArrayElement(array->type(), true)),
buffer, key, effect, control);
return ReplaceEagerly(node, load);
}
// Compute byte offset.
Node* offset = Word32Shl(key, static_cast<int>(k));
Node* load = graph()->NewNode(simplified()->LoadBuffer(access), buffer,
offset, length, effect, control);
return ReplaceEagerly(node, load);
}
}
return NoChange();
......@@ -707,56 +720,70 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
Node* value = NodeProperties::GetValueInput(node, 2);
Type* key_type = NodeProperties::GetBounds(key).upper;
Type* base_type = NodeProperties::GetBounds(base).upper;
Type* value_type = NodeProperties::GetBounds(value).upper;
// TODO(mstarzinger): This lowering is not correct if:
// a) The typed array or its buffer is neutered.
if (key_type->Is(Type::Integral32()) && base_type->IsConstant() &&
if (base_type->IsConstant() &&
base_type->AsConstant()->Value()->IsJSTypedArray()) {
// JSStoreProperty(typed-array, int32, value)
Handle<JSTypedArray> array =
Handle<JSTypedArray> const array =
Handle<JSTypedArray>::cast(base_type->AsConstant()->Value());
if (IsExternalArrayElementsKind(array->map()->elements_kind())) {
ExternalArrayType type = array->type();
double byte_length = array->byte_length()->Number();
if (byte_length <= kMaxInt) {
BufferAccess const access(array->type());
size_t const k = ElementSizeLog2Of(access.machine_type());
double const byte_length = array->byte_length()->Number();
CHECK_LT(k, arraysize(shifted_int32_ranges_));
if (IsExternalArrayElementsKind(array->map()->elements_kind()) &&
access.external_array_type() != kExternalUint8ClampedArray &&
key_type->Is(shifted_int32_ranges_[k]) && byte_length <= kMaxInt) {
// JSLoadProperty(typed-array, int32)
Handle<ExternalArray> elements =
Handle<ExternalArray>::cast(handle(array->elements()));
Node* pointer = jsgraph()->IntPtrConstant(
bit_cast<intptr_t>(elements->external_pointer()));
Node* length = jsgraph()->Constant(array->length()->Number());
Node* buffer = jsgraph()->PointerConstant(elements->external_pointer());
Node* length = jsgraph()->Constant(byte_length);
Node* context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
ElementAccess access = AccessBuilder::ForTypedArrayElement(type, true);
Type* value_type = NodeProperties::GetBounds(value).upper;
// If the value input does not have the required type, insert the
// appropriate conversion.
// Convert to a number first.
if (!value_type->Is(Type::Number())) {
Reduction number_reduction = ReduceJSToNumberInput(value);
if (number_reduction.Changed()) {
value = number_reduction.replacement();
} else {
Node* context = NodeProperties::GetContextInput(node);
value = graph()->NewNode(javascript()->ToNumber(), value, context,
effect, control);
effect = value;
value = effect = graph()->NewNode(javascript()->ToNumber(), value,
context, effect, control);
}
}
// For integer-typed arrays, convert to the integer type.
if (access.type->Is(Type::Signed32()) &&
if (TypeOf(access.machine_type()) == kTypeInt32 &&
!value_type->Is(Type::Signed32())) {
value = graph()->NewNode(simplified()->NumberToInt32(), value);
} else if (access.type->Is(Type::Unsigned32()) &&
} else if (TypeOf(access.machine_type()) == kTypeUint32 &&
!value_type->Is(Type::Unsigned32())) {
value = graph()->NewNode(simplified()->NumberToUint32(), value);
}
Node* store =
graph()->NewNode(simplified()->StoreElement(access), pointer, key,
length, value, effect, control);
return ReplaceEagerly(node, store);
// Check if we can avoid the bounds check.
if (key_type->Min() >= 0 && key_type->Max() < array->length()->Number()) {
node->set_op(simplified()->StoreElement(
AccessBuilder::ForTypedArrayElement(array->type(), true)));
node->ReplaceInput(0, buffer);
DCHECK_EQ(key, node->InputAt(1));
node->ReplaceInput(2, value);
node->ReplaceInput(3, effect);
node->ReplaceInput(4, control);
node->TrimInputCount(5);
return Changed(node);
}
// Compute byte offset.
Node* offset = Word32Shl(key, static_cast<int>(k));
// Turn into a StoreBuffer operation.
node->set_op(simplified()->StoreBuffer(access));
node->ReplaceInput(0, buffer);
node->ReplaceInput(1, offset);
node->ReplaceInput(2, length);
node->ReplaceInput(3, value);
node->ReplaceInput(4, effect);
DCHECK_EQ(control, node->InputAt(5));
DCHECK_EQ(6, node->InputCount());
return Changed(node);
}
}
return NoChange();
......@@ -857,6 +884,13 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return NoChange();
}
Node* JSTypedLowering::Word32Shl(Node* const lhs, int32_t const rhs) {
if (rhs == 0) return lhs;
return graph()->NewNode(machine()->Word32Shl(), lhs,
jsgraph()->Int32Constant(rhs));
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -19,9 +19,9 @@ namespace compiler {
class JSTypedLowering FINAL : public Reducer {
public:
explicit JSTypedLowering(JSGraph* jsgraph);
virtual ~JSTypedLowering();
~JSTypedLowering() {}
virtual Reduction Reduce(Node* node) OVERRIDE;
Reduction Reduce(Node* node) OVERRIDE;
JSGraph* jsgraph() { return jsgraph_; }
Graph* graph() { return jsgraph_->graph(); }
......@@ -51,6 +51,8 @@ class JSTypedLowering FINAL : public Reducer {
Reduction ReduceI32Shift(Node* node, bool left_signed,
const Operator* shift_op);
Node* Word32Shl(Node* const lhs, int32_t const rhs);
JSOperatorBuilder* javascript() { return jsgraph_->javascript(); }
CommonOperatorBuilder* common() { return jsgraph_->common(); }
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
......@@ -61,6 +63,7 @@ class JSTypedLowering FINAL : public Reducer {
Type* zero_range_;
Type* one_range_;
Type* zero_thirtyone_range_;
Type* shifted_int32_ranges_[4];
};
} // namespace compiler
......
......@@ -119,38 +119,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kProjection:
return ReduceProjection(OpParameter<size_t>(node), node->InputAt(0));
case IrOpcode::kWord32And: {
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.right().node()); // x & 0 => 0
if (m.right().Is(-1)) return Replace(m.left().node()); // x & -1 => x
if (m.IsFoldable()) { // K & K => K
return ReplaceInt32(m.left().Value() & m.right().Value());
}
if (m.LeftEqualsRight()) return Replace(m.left().node()); // x & x => x
if (m.left().IsWord32And() && m.right().HasValue()) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) { // (x & K) & K => x & K
node->ReplaceInput(0, mleft.left().node());
node->ReplaceInput(
1, Int32Constant(m.right().Value() & mleft.right().Value()));
return Changed(node);
}
}
if (m.left().IsInt32Add() && m.right().IsNegativePowerOf2()) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() &&
(mleft.right().Value() & m.right().Value()) ==
mleft.right().Value()) {
// (x + K) & K => (x & K) + K
return Replace(graph()->NewNode(
machine()->Int32Add(),
graph()->NewNode(machine()->Word32And(), mleft.left().node(),
m.right().node()),
mleft.right().node()));
}
}
break;
}
case IrOpcode::kWord32And:
return ReduceWord32And(node);
case IrOpcode::kWord32Or:
return ReduceWord32Or(node);
case IrOpcode::kWord32Xor: {
......@@ -168,28 +138,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
break;
}
case IrOpcode::kWord32Shl: {
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x << 0 => x
if (m.IsFoldable()) { // K << K => K
return ReplaceInt32(m.left().Value() << m.right().Value());
}
if (m.right().IsInRange(1, 31)) {
// (x >>> K) << K => x & ~(2^K - 1)
// (x >> K) << K => x & ~(2^K - 1)
if (m.left().IsWord32Sar() || m.left().IsWord32Shr()) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().Is(m.right().Value())) {
node->set_op(machine()->Word32And());
node->ReplaceInput(0, mleft.left().node());
node->ReplaceInput(
1, Uint32Constant(~((1U << m.right().Value()) - 1U)));
return Changed(node);
}
}
}
return ReduceWord32Shifts(node);
}
case IrOpcode::kWord32Shl:
return ReduceWord32Shl(node);
case IrOpcode::kWord32Shr: {
Uint32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x >>> 0 => x
......@@ -295,7 +245,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.right().IsPowerOf2()) { // x * 2^n => x << n
node->set_op(machine()->Word32Shl());
node->ReplaceInput(1, Int32Constant(WhichPowerOf2(m.right().Value())));
return Changed(node);
Reduction reduction = ReduceWord32Shl(node);
return reduction.Changed() ? reduction : Changed(node);
}
break;
}
......@@ -763,7 +714,6 @@ Reduction MachineOperatorReducer::ReduceWord32Shifts(Node* node) {
DCHECK((node->opcode() == IrOpcode::kWord32Shl) ||
(node->opcode() == IrOpcode::kWord32Shr) ||
(node->opcode() == IrOpcode::kWord32Sar));
if (machine()->Word32ShiftIsSafe()) {
// Remove the explicit 'and' with 0x1f if the shift provided by the machine
// instruction matches that required by JavaScript.
......@@ -780,9 +730,69 @@ Reduction MachineOperatorReducer::ReduceWord32Shifts(Node* node) {
}
Reduction MachineOperatorReducer::ReduceWord32Or(Node* node) {
DCHECK(node->opcode() == IrOpcode::kWord32Or);
Reduction MachineOperatorReducer::ReduceWord32Shl(Node* node) {
DCHECK_EQ(IrOpcode::kWord32Shl, node->opcode());
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x << 0 => x
if (m.IsFoldable()) { // K << K => K
return ReplaceInt32(m.left().Value() << m.right().Value());
}
if (m.right().IsInRange(1, 31)) {
// (x >>> K) << K => x & ~(2^K - 1)
// (x >> K) << K => x & ~(2^K - 1)
if (m.left().IsWord32Sar() || m.left().IsWord32Shr()) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().Is(m.right().Value())) {
node->set_op(machine()->Word32And());
node->ReplaceInput(0, mleft.left().node());
node->ReplaceInput(1,
Uint32Constant(~((1U << m.right().Value()) - 1U)));
Reduction reduction = ReduceWord32And(node);
return reduction.Changed() ? reduction : Changed(node);
}
}
}
return ReduceWord32Shifts(node);
}
Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
DCHECK_EQ(IrOpcode::kWord32And, node->opcode());
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.right().node()); // x & 0 => 0
if (m.right().Is(-1)) return Replace(m.left().node()); // x & -1 => x
if (m.IsFoldable()) { // K & K => K
return ReplaceInt32(m.left().Value() & m.right().Value());
}
if (m.LeftEqualsRight()) return Replace(m.left().node()); // x & x => x
if (m.left().IsWord32And() && m.right().HasValue()) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) { // (x & K) & K => x & K
node->ReplaceInput(0, mleft.left().node());
node->ReplaceInput(
1, Int32Constant(m.right().Value() & mleft.right().Value()));
Reduction reduction = ReduceWord32And(node);
return reduction.Changed() ? reduction : Changed(node);
}
}
if (m.left().IsInt32Add() && m.right().IsNegativePowerOf2()) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() &&
(mleft.right().Value() & m.right().Value()) == mleft.right().Value()) {
// (x + K) & K => (x & K) + K
return Replace(graph()->NewNode(
machine()->Int32Add(),
graph()->NewNode(machine()->Word32And(), mleft.left().node(),
m.right().node()),
mleft.right().node()));
}
}
return NoChange();
}
Reduction MachineOperatorReducer::ReduceWord32Or(Node* node) {
DCHECK_EQ(IrOpcode::kWord32Or, node->opcode());
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x | 0 => x
if (m.right().Is(-1)) return Replace(m.right().node()); // x | -1 => -1
......
......@@ -69,6 +69,8 @@ class MachineOperatorReducer FINAL : public Reducer {
Reduction ReduceStore(Node* node);
Reduction ReduceProjection(size_t index, Node* node);
Reduction ReduceWord32Shifts(Node* node);
Reduction ReduceWord32Shl(Node* node);
Reduction ReduceWord32And(Node* node);
Reduction ReduceWord32Or(Node* node);
Graph* graph() const;
......
......@@ -54,6 +54,18 @@ StoreRepresentation const& StoreRepresentationOf(Operator const* op) {
}
CheckedLoadRepresentation CheckedLoadRepresentationOf(Operator const* op) {
DCHECK_EQ(IrOpcode::kCheckedLoad, op->opcode());
return OpParameter<CheckedLoadRepresentation>(op);
}
CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
DCHECK_EQ(IrOpcode::kCheckedStore, op->opcode());
return OpParameter<CheckedStoreRepresentation>(op);
}
#define PURE_OP_LIST(V) \
V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Word32Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
......@@ -165,7 +177,15 @@ struct MachineOperatorGlobalCache {
IrOpcode::kLoad, Operator::kNoThrow | Operator::kNoWrite, \
"Load", 2, 1, 1, 1, 1, 0, k##Type) {} \
}; \
Load##Type##Operator k##Load##Type;
struct CheckedLoad##Type##Operator FINAL \
: public Operator1<CheckedLoadRepresentation> { \
CheckedLoad##Type##Operator() \
: Operator1<CheckedLoadRepresentation>( \
IrOpcode::kCheckedLoad, Operator::kNoThrow | Operator::kNoWrite, \
"CheckedLoad", 3, 1, 1, 1, 1, 0, k##Type) {} \
}; \
Load##Type##Operator kLoad##Type; \
CheckedLoad##Type##Operator kCheckedLoad##Type;
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
......@@ -187,8 +207,16 @@ struct MachineOperatorGlobalCache {
Store##Type##FullWriteBarrier##Operator() \
: Store##Type##Operator(kFullWriteBarrier) {} \
}; \
Store##Type##NoWriteBarrier##Operator k##Store##Type##NoWriteBarrier; \
Store##Type##FullWriteBarrier##Operator k##Store##Type##FullWriteBarrier;
struct CheckedStore##Type##Operator FINAL \
: public Operator1<CheckedStoreRepresentation> { \
CheckedStore##Type##Operator() \
: Operator1<CheckedStoreRepresentation>( \
IrOpcode::kCheckedStore, Operator::kNoRead | Operator::kNoThrow, \
"CheckedStore", 4, 1, 1, 0, 1, 0, k##Type) {} \
}; \
Store##Type##NoWriteBarrier##Operator kStore##Type##NoWriteBarrier; \
Store##Type##FullWriteBarrier##Operator kStore##Type##FullWriteBarrier; \
CheckedStore##Type##Operator kCheckedStore##Type;
MACHINE_TYPE_LIST(STORE)
#undef STORE
};
......@@ -216,10 +244,9 @@ const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
switch (rep) {
#define LOAD(Type) \
case k##Type: \
return &cache_.k##Load##Type;
return &cache_.kLoad##Type;
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
default:
break;
}
......@@ -252,6 +279,43 @@ const Operator* MachineOperatorBuilder::Store(StoreRepresentation rep) {
IrOpcode::kStore, Operator::kNoRead | Operator::kNoThrow, "Store", 3, 1,
1, 0, 1, 0, rep);
}
const Operator* MachineOperatorBuilder::CheckedLoad(
CheckedLoadRepresentation rep) {
switch (rep) {
#define LOAD(Type) \
case k##Type: \
return &cache_.kCheckedLoad##Type;
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
default:
break;
}
// Uncached.
return new (zone_) Operator1<CheckedLoadRepresentation>(
IrOpcode::kCheckedLoad, Operator::kNoThrow | Operator::kNoWrite,
"CheckedLoad", 3, 1, 1, 1, 1, 0, rep);
}
const Operator* MachineOperatorBuilder::CheckedStore(
CheckedStoreRepresentation rep) {
switch (rep) {
#define STORE(Type) \
case k##Type: \
return &cache_.kCheckedStore##Type;
MACHINE_TYPE_LIST(STORE)
#undef STORE
default:
break;
}
// Uncached.
return new (zone_) Operator1<CheckedStoreRepresentation>(
IrOpcode::kCheckedStore, Operator::kNoRead | Operator::kNoThrow,
"CheckedStore", 4, 1, 1, 0, 1, 0, rep);
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -53,6 +53,18 @@ std::ostream& operator<<(std::ostream&, StoreRepresentation);
StoreRepresentation const& StoreRepresentationOf(Operator const*);
// A CheckedLoad needs a MachineType.
typedef MachineType CheckedLoadRepresentation;
CheckedLoadRepresentation CheckedLoadRepresentationOf(Operator const*);
// A CheckedStore needs a MachineType.
typedef MachineType CheckedStoreRepresentation;
CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const*);
// Interface for building machine-level operators. These operators are
// machine-level but machine-independent and thus define a language suitable
// for generating code to run on architectures such as ia32, x64, arm, etc.
......@@ -174,6 +186,11 @@ class MachineOperatorBuilder FINAL : public ZoneObject {
// Access to the machine stack.
const Operator* LoadStackPointer();
// checked-load heap, index, length
const Operator* CheckedLoad(CheckedLoadRepresentation);
// checked-store heap, index, length, value
const Operator* CheckedStore(CheckedStoreRepresentation);
// Target machine word-size assumed by this builder.
bool Is32() const { return word() == kRepWord32; }
bool Is64() const { return word() == kRepWord64; }
......
......@@ -158,8 +158,10 @@
V(ChangeBoolToBit) \
V(ChangeBitToBool) \
V(LoadField) \
V(LoadBuffer) \
V(LoadElement) \
V(StoreField) \
V(StoreBuffer) \
V(StoreElement) \
V(ObjectIsSmi) \
V(ObjectIsNonNegativeSmi)
......@@ -232,7 +234,9 @@
V(Float64Ceil) \
V(Float64RoundTruncate) \
V(Float64RoundTiesAway) \
V(LoadStackPointer)
V(LoadStackPointer) \
V(CheckedLoad) \
V(CheckedStore)
#define VALUE_OP_LIST(V) \
COMMON_OP_LIST(V) \
......
This diff is collapsed.
......@@ -14,6 +14,10 @@ namespace v8 {
namespace internal {
namespace compiler {
// Forward declarations.
class RepresentationChanger;
class SimplifiedLowering FINAL {
public:
explicit SimplifiedLowering(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
......@@ -26,7 +30,10 @@ class SimplifiedLowering FINAL {
void DoStoreField(Node* node);
// TODO(turbofan): The output_type can be removed once the result of the
// representation analysis is stored in the node bounds.
void DoLoadElement(Node* node, MachineType output_type);
void DoLoadBuffer(Node* node, MachineType output_type,
RepresentationChanger* changer);
void DoStoreBuffer(Node* node);
void DoLoadElement(Node* node);
void DoStoreElement(Node* node);
void DoStringAdd(Node* node);
void DoStringEqual(Node* node);
......
......@@ -98,38 +98,6 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
if (m.HasValue()) return ReplaceNumber(FastUI2D(m.Value()));
break;
}
case IrOpcode::kLoadElement: {
ElementAccess access = ElementAccessOf(node->op());
if (access.bounds_check == kTypedArrayBoundsCheck) {
NumberMatcher mkey(node->InputAt(1));
NumberMatcher mlength(node->InputAt(2));
if (mkey.HasValue() && mlength.HasValue()) {
// Skip the typed array bounds check if key and length are constant.
if (mkey.Value() >= 0 && mkey.Value() < mlength.Value()) {
access.bounds_check = kNoBoundsCheck;
node->set_op(simplified()->LoadElement(access));
return Changed(node);
}
}
}
break;
}
case IrOpcode::kStoreElement: {
ElementAccess access = ElementAccessOf(node->op());
if (access.bounds_check == kTypedArrayBoundsCheck) {
NumberMatcher mkey(node->InputAt(1));
NumberMatcher mlength(node->InputAt(2));
if (mkey.HasValue() && mlength.HasValue()) {
// Skip the typed array bounds check if key and length are constant.
if (mkey.Value() >= 0 && mkey.Value() < mlength.Value()) {
access.bounds_check = kNoBoundsCheck;
node->set_op(simplified()->StoreElement(access));
return Changed(node);
}
}
}
break;
}
default:
break;
}
......
......@@ -25,6 +25,65 @@ std::ostream& operator<<(std::ostream& os, BaseTaggedness base_taggedness) {
}
MachineType BufferAccess::machine_type() const {
switch (external_array_type_) {
case kExternalUint8Array:
return kMachUint8;
case kExternalInt8Array:
return kMachInt8;
case kExternalUint16Array:
return kMachUint16;
case kExternalInt16Array:
return kMachInt16;
case kExternalUint32Array:
return kMachUint32;
case kExternalInt32Array:
return kMachInt32;
case kExternalFloat32Array:
return kMachFloat32;
case kExternalFloat64Array:
return kMachFloat64;
case kExternalUint8ClampedArray:
break;
}
UNREACHABLE();
return kMachNone;
}
bool operator==(BufferAccess lhs, BufferAccess rhs) {
return lhs.external_array_type() == rhs.external_array_type();
}
bool operator!=(BufferAccess lhs, BufferAccess rhs) { return !(lhs == rhs); }
size_t hash_value(BufferAccess access) {
return base::hash<ExternalArrayType>()(access.external_array_type());
}
std::ostream& operator<<(std::ostream& os, BufferAccess access) {
switch (access.external_array_type()) {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
case kExternal##Type##Array: \
return os << #Type;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
}
UNREACHABLE();
return os;
}
BufferAccess const BufferAccessOf(const Operator* op) {
DCHECK(op->opcode() == IrOpcode::kLoadBuffer ||
op->opcode() == IrOpcode::kStoreBuffer);
return OpParameter<BufferAccess>(op);
}
bool operator==(FieldAccess const& lhs, FieldAccess const& rhs) {
return lhs.base_is_tagged == rhs.base_is_tagged && lhs.offset == rhs.offset &&
lhs.machine_type == rhs.machine_type;
......@@ -57,18 +116,6 @@ std::ostream& operator<<(std::ostream& os, FieldAccess const& access) {
}
std::ostream& operator<<(std::ostream& os, BoundsCheckMode bounds_check_mode) {
switch (bounds_check_mode) {
case kNoBoundsCheck:
return os << "no bounds check";
case kTypedArrayBoundsCheck:
return os << "ignore out of bounds";
}
UNREACHABLE();
return os;
}
bool operator==(ElementAccess const& lhs, ElementAccess const& rhs) {
return lhs.base_is_tagged == rhs.base_is_tagged &&
lhs.header_size == rhs.header_size &&
......@@ -90,7 +137,7 @@ size_t hash_value(ElementAccess const& access) {
std::ostream& operator<<(std::ostream& os, ElementAccess const& access) {
os << access.base_is_tagged << ", " << access.header_size << ", ";
access.type->PrintTo(os);
os << ", " << access.machine_type << ", " << access.bounds_check;
os << ", " << access.machine_type;
return os;
}
......@@ -150,6 +197,26 @@ struct SimplifiedOperatorGlobalCache FINAL {
Name##Operator k##Name;
PURE_OP_LIST(PURE)
#undef PURE
#define BUFFER_ACCESS(Type, type, TYPE, ctype, size) \
struct LoadBuffer##Type##Operator FINAL : public Operator1<BufferAccess> { \
LoadBuffer##Type##Operator() \
: Operator1<BufferAccess>(IrOpcode::kLoadBuffer, \
Operator::kNoThrow | Operator::kNoWrite, \
"LoadBuffer", 3, 1, 1, 1, 1, 0, \
BufferAccess(kExternal##Type##Array)) {} \
}; \
struct StoreBuffer##Type##Operator FINAL : public Operator1<BufferAccess> { \
StoreBuffer##Type##Operator() \
: Operator1<BufferAccess>(IrOpcode::kStoreBuffer, \
Operator::kNoRead | Operator::kNoThrow, \
"StoreBuffer", 4, 1, 1, 0, 1, 0, \
BufferAccess(kExternal##Type##Array)) {} \
}; \
LoadBuffer##Type##Operator kLoadBuffer##Type; \
StoreBuffer##Type##Operator kStoreBuffer##Type;
TYPED_ARRAYS(BUFFER_ACCESS)
#undef BUFFER_ACCESS
};
......@@ -175,11 +242,37 @@ const Operator* SimplifiedOperatorBuilder::ReferenceEqual(Type* type) {
}
const Operator* SimplifiedOperatorBuilder::LoadBuffer(BufferAccess access) {
switch (access.external_array_type()) {
#define LOAD_BUFFER(Type, type, TYPE, ctype, size) \
case kExternal##Type##Array: \
return &cache_.kLoadBuffer##Type;
TYPED_ARRAYS(LOAD_BUFFER)
#undef LOAD_BUFFER
}
UNREACHABLE();
return nullptr;
}
const Operator* SimplifiedOperatorBuilder::StoreBuffer(BufferAccess access) {
switch (access.external_array_type()) {
#define STORE_BUFFER(Type, type, TYPE, ctype, size) \
case kExternal##Type##Array: \
return &cache_.kStoreBuffer##Type;
TYPED_ARRAYS(STORE_BUFFER)
#undef STORE_BUFFER
}
UNREACHABLE();
return nullptr;
}
#define ACCESS_OP_LIST(V) \
V(LoadField, FieldAccess, Operator::kNoWrite, 1, 1, 1) \
V(StoreField, FieldAccess, Operator::kNoRead, 2, 1, 0) \
V(LoadElement, ElementAccess, Operator::kNoWrite, 3, 0, 1) \
V(StoreElement, ElementAccess, Operator::kNoRead, 4, 1, 0)
V(LoadElement, ElementAccess, Operator::kNoWrite, 2, 1, 1) \
V(StoreElement, ElementAccess, Operator::kNoRead, 3, 1, 0)
#define ACCESS(Name, Type, properties, value_input_count, control_input_count, \
......
......@@ -33,6 +33,29 @@ enum BaseTaggedness { kUntaggedBase, kTaggedBase };
std::ostream& operator<<(std::ostream&, BaseTaggedness);
// An access descriptor for loads/stores of array buffers.
class BufferAccess FINAL {
public:
explicit BufferAccess(ExternalArrayType external_array_type)
: external_array_type_(external_array_type) {}
ExternalArrayType external_array_type() const { return external_array_type_; }
MachineType machine_type() const;
private:
ExternalArrayType const external_array_type_;
};
bool operator==(BufferAccess, BufferAccess);
bool operator!=(BufferAccess, BufferAccess);
size_t hash_value(BufferAccess);
std::ostream& operator<<(std::ostream&, BufferAccess);
BufferAccess const BufferAccessOf(const Operator* op) WARN_UNUSED_RESULT;
// An access descriptor for loads/stores of fixed structures like field
// accesses of heap objects. Accesses from either tagged or untagged base
// pointers are supported; untagging is done automatically during lowering.
......@@ -53,11 +76,7 @@ size_t hash_value(FieldAccess const&);
std::ostream& operator<<(std::ostream&, FieldAccess const&);
// The bound checking mode for ElementAccess below.
enum BoundsCheckMode { kNoBoundsCheck, kTypedArrayBoundsCheck };
std::ostream& operator<<(std::ostream&, BoundsCheckMode);
FieldAccess const& FieldAccessOf(const Operator* op) WARN_UNUSED_RESULT;
// An access descriptor for loads/stores of indexed structures like characters
......@@ -65,7 +84,6 @@ std::ostream& operator<<(std::ostream&, BoundsCheckMode);
// untagged base pointers are supported; untagging is done automatically during
// lowering.
struct ElementAccess {
BoundsCheckMode bounds_check; // specifies the bounds checking mode.
BaseTaggedness base_is_tagged; // specifies if the base pointer is tagged.
int header_size; // size of the header, without tag.
Type* type; // type of the element.
......@@ -81,13 +99,7 @@ size_t hash_value(ElementAccess const&);
std::ostream& operator<<(std::ostream&, ElementAccess const&);
// If the accessed object is not a heap object, add this to the header_size.
static const int kNonHeapObjectHeaderSize = kHeapObjectTag;
const FieldAccess& FieldAccessOf(const Operator* op) WARN_UNUSED_RESULT;
const ElementAccess& ElementAccessOf(const Operator* op) WARN_UNUSED_RESULT;
ElementAccess const& ElementAccessOf(const Operator* op) WARN_UNUSED_RESULT;
// Interface for building simplified operators, which represent the
......@@ -149,8 +161,14 @@ class SimplifiedOperatorBuilder FINAL {
const Operator* ObjectIsSmi();
const Operator* ObjectIsNonNegativeSmi();
const Operator* LoadField(const FieldAccess&);
const Operator* StoreField(const FieldAccess&);
const Operator* LoadField(FieldAccess const&);
const Operator* StoreField(FieldAccess const&);
// load-buffer buffer, offset, length
const Operator* LoadBuffer(BufferAccess);
// store-buffer buffer, offset, length, value
const Operator* StoreBuffer(BufferAccess);
// load-element [base + index], length
const Operator* LoadElement(ElementAccess const&);
......
......@@ -69,6 +69,12 @@ Typer::Typer(Graph* graph, MaybeHandle<Context> context)
integer = Type::Range(minusinfinity, infinity, zone);
weakint = Type::Union(integer, nan_or_minuszero, zone);
signed8_ = Type::Range(f->NewNumber(kMinInt8), f->NewNumber(kMaxInt8), zone);
unsigned8_ = Type::Range(zero, f->NewNumber(kMaxUInt8), zone);
signed16_ =
Type::Range(f->NewNumber(kMinInt16), f->NewNumber(kMaxInt16), zone);
unsigned16_ = Type::Range(zero, f->NewNumber(kMaxUInt16), zone);
number_fun0_ = Type::Function(number, zone);
number_fun1_ = Type::Function(number, number, zone);
number_fun2_ = Type::Function(number, number, number, zone);
......@@ -79,25 +85,15 @@ Typer::Typer(Graph* graph, MaybeHandle<Context> context)
random_fun_ = Type::Function(Type::Union(
Type::UnsignedSmall(), Type::OtherNumber(), zone), zone);
Type* int8 = Type::Intersect(
Type::Range(f->NewNumber(-0x7F), f->NewNumber(0x7F-1), zone),
Type::UntaggedInt8(), zone);
Type* int16 = Type::Intersect(
Type::Range(f->NewNumber(-0x7FFF), f->NewNumber(0x7FFF-1), zone),
Type::UntaggedInt16(), zone);
Type* uint8 = Type::Intersect(
Type::Range(zero, f->NewNumber(0xFF-1), zone),
Type::UntaggedInt8(), zone);
Type* uint16 = Type::Intersect(
Type::Range(zero, f->NewNumber(0xFFFF-1), zone),
Type::UntaggedInt16(), zone);
#define NATIVE_TYPE(sem, rep) \
Type::Intersect(Type::sem(), Type::rep(), zone)
Type* int32 = NATIVE_TYPE(Signed32, UntaggedInt32);
Type* uint32 = NATIVE_TYPE(Unsigned32, UntaggedInt32);
Type* float32 = NATIVE_TYPE(Number, UntaggedFloat32);
Type* float64 = NATIVE_TYPE(Number, UntaggedFloat64);
#define NATIVE_TYPE(sem, rep) Type::Intersect(sem, rep, zone)
Type* int8 = NATIVE_TYPE(signed8_, Type::UntaggedInt8());
Type* uint8 = NATIVE_TYPE(unsigned8_, Type::UntaggedInt8());
Type* int16 = NATIVE_TYPE(signed16_, Type::UntaggedInt16());
Type* uint16 = NATIVE_TYPE(unsigned16_, Type::UntaggedInt16());
Type* int32 = NATIVE_TYPE(Type::Signed32(), Type::UntaggedInt32());
Type* uint32 = NATIVE_TYPE(Type::Unsigned32(), Type::UntaggedInt32());
Type* float32 = NATIVE_TYPE(Type::Number(), Type::UntaggedFloat32());
Type* float64 = NATIVE_TYPE(Type::Number(), Type::UntaggedFloat64());
#undef NATIVE_TYPE
Type* buffer = Type::Buffer(zone);
......@@ -1520,6 +1516,31 @@ Bounds Typer::Visitor::TypeLoadField(Node* node) {
}
Bounds Typer::Visitor::TypeLoadBuffer(Node* node) {
switch (BufferAccessOf(node->op()).external_array_type()) {
case kExternalInt8Array:
return Bounds(typer_->signed8_);
case kExternalUint8Array:
return Bounds(typer_->unsigned8_);
case kExternalInt16Array:
return Bounds(typer_->signed16_);
case kExternalUint16Array:
return Bounds(typer_->unsigned16_);
case kExternalInt32Array:
return Bounds(Type::Signed32());
case kExternalUint32Array:
return Bounds(Type::Unsigned32());
case kExternalFloat32Array:
case kExternalFloat64Array:
return Bounds(Type::Number());
case kExternalUint8ClampedArray:
break;
}
UNREACHABLE();
return Bounds();
}
Bounds Typer::Visitor::TypeLoadElement(Node* node) {
return Bounds(ElementAccessOf(node->op()).type);
}
......@@ -1531,6 +1552,12 @@ Bounds Typer::Visitor::TypeStoreField(Node* node) {
}
Bounds Typer::Visitor::TypeStoreBuffer(Node* node) {
UNREACHABLE();
return Bounds();
}
Bounds Typer::Visitor::TypeStoreElement(Node* node) {
UNREACHABLE();
return Bounds();
......@@ -1897,6 +1924,17 @@ Bounds Typer::Visitor::TypeLoadStackPointer(Node* node) {
}
Bounds Typer::Visitor::TypeCheckedLoad(Node* node) {
return Bounds::Unbounded(zone());
}
Bounds Typer::Visitor::TypeCheckedStore(Node* node) {
UNREACHABLE();
return Bounds();
}
// Heap constants.
......@@ -1977,6 +2015,6 @@ Type* Typer::Visitor::TypeConstant(Handle<Object> value) {
return Type::Constant(value, zone());
}
}
}
} // namespace v8::internal::compiler
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -52,6 +52,10 @@ class Typer {
Type* falsish;
Type* integer;
Type* weakint;
Type* signed8_;
Type* unsigned8_;
Type* signed16_;
Type* unsigned16_;
Type* number_fun0_;
Type* number_fun1_;
Type* number_fun2_;
......@@ -73,8 +77,9 @@ class Typer {
ZoneVector<Handle<Object> > weaken_max_limits_;
DISALLOW_COPY_AND_ASSIGN(Typer);
};
}
}
} // namespace v8::internal::compiler
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_TYPER_H_
......@@ -633,6 +633,8 @@ void Verifier::Visitor::Pre(Node* node) {
// CheckValueInputIs(node, 0, Type::Object());
// CheckUpperIs(node, Field(node).type));
break;
case IrOpcode::kLoadBuffer:
break;
case IrOpcode::kLoadElement:
// Object -> elementtype
// TODO(rossberg): activate once machine ops are typed.
......@@ -646,6 +648,8 @@ void Verifier::Visitor::Pre(Node* node) {
// CheckValueInputIs(node, 1, Field(node).type));
CheckNotTyped(node);
break;
case IrOpcode::kStoreBuffer:
break;
case IrOpcode::kStoreElement:
// (Object, elementtype) -> _|_
// TODO(rossberg): activate once machine ops are typed.
......@@ -723,6 +727,8 @@ void Verifier::Visitor::Pre(Node* node) {
case IrOpcode::kChangeFloat64ToInt32:
case IrOpcode::kChangeFloat64ToUint32:
case IrOpcode::kLoadStackPointer:
case IrOpcode::kCheckedLoad:
case IrOpcode::kCheckedStore:
// TODO(rossberg): Check.
break;
}
......
......@@ -119,18 +119,45 @@ class X64OperandConverter : public InstructionOperandConverter {
return Operand(no_reg, 0);
}
Operand MemoryOperand() {
int first_input = 0;
Operand MemoryOperand(int first_input = 0) {
return MemoryOperand(&first_input);
}
};
static bool HasImmediateInput(Instruction* instr, int index) {
namespace {
bool HasImmediateInput(Instruction* instr, int index) {
return instr->InputAt(index)->IsImmediate();
}
class OutOfLineLoadInteger FINAL : public OutOfLineCode {
public:
OutOfLineLoadInteger(CodeGenerator* gen, Register result)
: OutOfLineCode(gen), result_(result) {}
void Generate() FINAL { __ xorl(result_, result_); }
private:
Register const result_;
};
class OutOfLineLoadFloat FINAL : public OutOfLineCode {
public:
OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result)
: OutOfLineCode(gen), result_(result) {}
void Generate() FINAL { __ pcmpeqd(result_, result_); }
private:
XMMRegister const result_;
};
} // namespace
#define ASSEMBLE_UNOP(asm_instr) \
do { \
if (instr->Output()->IsRegister()) { \
......@@ -220,6 +247,72 @@ static bool HasImmediateInput(Instruction* instr, int index) {
} while (0)
#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
do { \
auto result = i.OutputDoubleRegister(); \
auto offset = i.InputRegister(0); \
if (instr->InputAt(1)->IsRegister()) { \
__ cmpl(offset, i.InputRegister(1)); \
} else { \
__ cmpl(offset, i.InputImmediate(1)); \
} \
OutOfLineCode* ool = new (zone()) OutOfLineLoadFloat(this, result); \
__ j(above_equal, ool->entry()); \
__ asm_instr(result, i.MemoryOperand(2)); \
__ bind(ool->exit()); \
} while (false)
#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
do { \
auto result = i.OutputRegister(); \
auto offset = i.InputRegister(0); \
if (instr->InputAt(1)->IsRegister()) { \
__ cmpl(offset, i.InputRegister(1)); \
} else { \
__ cmpl(offset, i.InputImmediate(1)); \
} \
OutOfLineCode* ool = new (zone()) OutOfLineLoadInteger(this, result); \
__ j(above_equal, ool->entry()); \
__ asm_instr(result, i.MemoryOperand(2)); \
__ bind(ool->exit()); \
} while (false)
#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
do { \
auto offset = i.InputRegister(0); \
if (instr->InputAt(1)->IsRegister()) { \
__ cmpl(offset, i.InputRegister(1)); \
} else { \
__ cmpl(offset, i.InputImmediate(1)); \
} \
Label done; \
__ j(above_equal, &done, Label::kNear); \
__ asm_instr(i.MemoryOperand(3), i.InputDoubleRegister(2)); \
__ bind(&done); \
} while (false)
#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
do { \
auto offset = i.InputRegister(0); \
if (instr->InputAt(1)->IsRegister()) { \
__ cmpl(offset, i.InputRegister(1)); \
} else { \
__ cmpl(offset, i.InputImmediate(1)); \
} \
Label done; \
__ j(above_equal, &done, Label::kNear); \
if (instr->InputAt(2)->IsRegister()) { \
__ asm_instr(i.MemoryOperand(3), i.InputRegister(2)); \
} else { \
__ asm_instr(i.MemoryOperand(3), i.InputImmediate(2)); \
} \
__ bind(&done); \
} while (false)
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
X64OperandConverter i(this, instr);
......@@ -667,6 +760,42 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ RecordWrite(object, index, value, mode);
break;
}
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
break;
case kCheckedLoadUint8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl);
break;
case kCheckedLoadInt16:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsxwl);
break;
case kCheckedLoadUint16:
ASSEMBLE_CHECKED_LOAD_INTEGER(movzxwl);
break;
case kCheckedLoadWord32:
ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
break;
case kCheckedLoadFloat32:
ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
break;
case kCheckedLoadFloat64:
ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
break;
case kCheckedStoreWord8:
ASSEMBLE_CHECKED_STORE_INTEGER(movb);
break;
case kCheckedStoreWord16:
ASSEMBLE_CHECKED_STORE_INTEGER(movw);
break;
case kCheckedStoreWord32:
ASSEMBLE_CHECKED_STORE_INTEGER(movl);
break;
case kCheckedStoreFloat32:
ASSEMBLE_CHECKED_STORE_FLOAT(movss);
break;
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
break;
}
}
......
......@@ -72,14 +72,14 @@ void InstructionSelector::VisitLoad(Node* node) {
UNREACHABLE();
return;
}
if (g.CanBeImmediate(base)) {
// load [#base + %index]
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(index), g.UseImmediate(base));
} else if (g.CanBeImmediate(index)) {
if (g.CanBeImmediate(index)) {
// load [%base + #index]
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
} else if (g.CanBeImmediate(base)) {
// load [#base + %index]
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(index), g.UseImmediate(base));
} else {
// load [%base + %index*1]
Emit(opcode | AddressingModeField::encode(kMode_MR1),
......@@ -136,14 +136,14 @@ void InstructionSelector::VisitStore(Node* node) {
}
InstructionOperand* value_operand =
g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
if (g.CanBeImmediate(base)) {
// store [#base + %index], %|#value
Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
g.UseRegister(index), g.UseImmediate(base), value_operand);
} else if (g.CanBeImmediate(index)) {
if (g.CanBeImmediate(index)) {
// store [%base + #index], %|#value
Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
g.UseRegister(base), g.UseImmediate(index), value_operand);
} else if (g.CanBeImmediate(base)) {
// store [#base + %index], %|#value
Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
g.UseRegister(index), g.UseImmediate(base), value_operand);
} else {
// store [%base + %index*1], %|#value
Emit(opcode | AddressingModeField::encode(kMode_MR1), nullptr,
......@@ -152,6 +152,110 @@ void InstructionSelector::VisitStore(Node* node) {
}
void InstructionSelector::VisitCheckedLoad(Node* node) {
MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
MachineType typ = TypeOf(OpParameter<MachineType>(node));
X64OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
ArchOpcode opcode;
switch (rep) {
case kRepWord8:
opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
case kRepWord16:
opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
case kRepWord32:
opcode = kCheckedLoadWord32;
break;
case kRepFloat32:
opcode = kCheckedLoadFloat32;
break;
case kRepFloat64:
opcode = kCheckedLoadFloat64;
break;
default:
UNREACHABLE();
return;
}
if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
Int32Matcher mlength(length);
Int32BinopMatcher moffset(offset);
if (mlength.HasValue() && moffset.right().HasValue() &&
mlength.Value() > moffset.right().Value()) {
InstructionOperand* offset_operand = g.UseRegister(moffset.left().node());
InstructionOperand* length_operand =
g.TempImmediate(mlength.Value() - moffset.right().Value());
Emit(opcode | AddressingModeField::encode(kMode_MR1I),
g.DefineAsRegister(node), offset_operand, length_operand,
g.UseRegister(buffer), offset_operand,
g.UseImmediate(moffset.right().node()));
return;
}
}
InstructionOperand* offset_operand = g.UseRegister(offset);
InstructionOperand* length_operand =
g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
Emit(opcode | AddressingModeField::encode(kMode_MR1),
g.DefineAsRegister(node), offset_operand, length_operand,
g.UseRegister(buffer), offset_operand);
}
void InstructionSelector::VisitCheckedStore(Node* node) {
MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
X64OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
ArchOpcode opcode;
switch (rep) {
case kRepWord8:
opcode = kCheckedStoreWord8;
break;
case kRepWord16:
opcode = kCheckedStoreWord16;
break;
case kRepWord32:
opcode = kCheckedStoreWord32;
break;
case kRepFloat32:
opcode = kCheckedStoreFloat32;
break;
case kRepFloat64:
opcode = kCheckedStoreFloat64;
break;
default:
UNREACHABLE();
return;
}
InstructionOperand* value_operand =
g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
Int32Matcher mlength(length);
Int32BinopMatcher moffset(offset);
if (mlength.HasValue() && moffset.right().HasValue() &&
mlength.Value() > moffset.right().Value()) {
InstructionOperand* offset_operand = g.UseRegister(moffset.left().node());
InstructionOperand* length_operand =
g.TempImmediate(mlength.Value() - moffset.right().Value());
Emit(opcode | AddressingModeField::encode(kMode_MR1I), nullptr,
offset_operand, length_operand, value_operand, g.UseRegister(buffer),
offset_operand, g.UseImmediate(moffset.right().node()));
return;
}
}
InstructionOperand* offset_operand = g.UseRegister(offset);
InstructionOperand* length_operand =
g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
Emit(opcode | AddressingModeField::encode(kMode_MR1), nullptr, offset_operand,
length_operand, value_operand, g.UseRegister(buffer), offset_operand);
}
// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
......
......@@ -457,11 +457,11 @@ void Assembler::mov_b(Register dst, const Operand& src) {
}
void Assembler::mov_b(const Operand& dst, int8_t imm8) {
void Assembler::mov_b(const Operand& dst, const Immediate& src) {
EnsureSpace ensure_space(this);
EMIT(0xC6);
emit_operand(eax, dst);
EMIT(imm8);
EMIT(static_cast<int8_t>(src.x_));
}
......@@ -489,13 +489,13 @@ void Assembler::mov_w(const Operand& dst, Register src) {
}
void Assembler::mov_w(const Operand& dst, int16_t imm16) {
void Assembler::mov_w(const Operand& dst, const Immediate& src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0xC7);
emit_operand(eax, dst);
EMIT(static_cast<int8_t>(imm16 & 0xff));
EMIT(static_cast<int8_t>(imm16 >> 8));
EMIT(static_cast<int8_t>(src.x_ & 0xff));
EMIT(static_cast<int8_t>(src.x_ >> 8));
}
......
......@@ -619,12 +619,14 @@ class Assembler : public AssemblerBase {
void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); }
void mov_b(Register dst, const Operand& src);
void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); }
void mov_b(const Operand& dst, int8_t imm8);
void mov_b(const Operand& dst, int8_t src) { mov_b(dst, Immediate(src)); }
void mov_b(const Operand& dst, const Immediate& src);
void mov_b(const Operand& dst, Register src);
void mov_w(Register dst, const Operand& src);
void mov_w(const Operand& dst, int16_t src) { mov_w(dst, Immediate(src)); }
void mov_w(const Operand& dst, const Immediate& src);
void mov_w(const Operand& dst, Register src);
void mov_w(const Operand& dst, int16_t imm16);
void mov(Register dst, int32_t imm32);
void mov(Register dst, const Immediate& x);
......
......@@ -127,14 +127,12 @@ class SimplifiedGraphBuilder : public GraphBuilder {
Node* StoreField(const FieldAccess& access, Node* object, Node* value) {
return NewNode(simplified()->StoreField(access), object, value);
}
Node* LoadElement(const ElementAccess& access, Node* object, Node* index,
Node* length) {
return NewNode(simplified()->LoadElement(access), object, index, length);
Node* LoadElement(const ElementAccess& access, Node* object, Node* index) {
return NewNode(simplified()->LoadElement(access), object, index);
}
Node* StoreElement(const ElementAccess& access, Node* object, Node* index,
Node* length, Node* value) {
return NewNode(simplified()->StoreElement(access), object, index, length,
value);
Node* value) {
return NewNode(simplified()->StoreElement(access), object, index, value);
}
protected:
......
......@@ -232,10 +232,8 @@ TEST(RunLoadStoreMap) {
TEST(RunLoadStoreFixedArrayIndex) {
SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
ElementAccess access = AccessBuilder::ForFixedArrayElement();
Node* load = t.LoadElement(access, t.Parameter(0), t.Int32Constant(0),
t.Int32Constant(2));
t.StoreElement(access, t.Parameter(0), t.Int32Constant(1), t.Int32Constant(2),
load);
Node* load = t.LoadElement(access, t.Parameter(0), t.Int32Constant(0));
t.StoreElement(access, t.Parameter(0), t.Int32Constant(1), load);
t.Return(load);
t.LowerAllNodes();
......@@ -264,10 +262,9 @@ TEST(RunLoadStoreArrayBuffer) {
Node* backing_store = t.LoadField(
AccessBuilder::ForJSArrayBufferBackingStore(), t.Parameter(0));
Node* load =
t.LoadElement(buffer_access, backing_store, t.Int32Constant(index),
t.Int32Constant(array_length));
t.LoadElement(buffer_access, backing_store, t.Int32Constant(index));
t.StoreElement(buffer_access, backing_store, t.Int32Constant(index + 1),
t.Int32Constant(array_length), load);
load);
t.Return(t.jsgraph.TrueConstant());
t.LowerAllNodes();
......@@ -350,13 +347,12 @@ TEST(RunLoadElementFromUntaggedBase) {
for (size_t i = 0; i < arraysize(smis); i++) { // for header sizes
for (size_t j = 0; (i + j) < arraysize(smis); j++) { // for element index
int offset = static_cast<int>(i * sizeof(Smi*));
ElementAccess access = {kNoBoundsCheck, kUntaggedBase, offset,
Type::Integral32(), kMachAnyTagged};
ElementAccess access = {kUntaggedBase, offset, Type::Integral32(),
kMachAnyTagged};
SimplifiedLoweringTester<Object*> t;
Node* load = t.LoadElement(
access, t.PointerConstant(smis), t.Int32Constant(static_cast<int>(j)),
t.Int32Constant(static_cast<int>(arraysize(smis))));
Node* load = t.LoadElement(access, t.PointerConstant(smis),
t.Int32Constant(static_cast<int>(j)));
t.Return(load);
t.LowerAllNodes();
......@@ -379,14 +375,13 @@ TEST(RunStoreElementFromUntaggedBase) {
for (size_t i = 0; i < arraysize(smis); i++) { // for header sizes
for (size_t j = 0; (i + j) < arraysize(smis); j++) { // for element index
int offset = static_cast<int>(i * sizeof(Smi*));
ElementAccess access = {kNoBoundsCheck, kUntaggedBase, offset,
Type::Integral32(), kMachAnyTagged};
ElementAccess access = {kUntaggedBase, offset, Type::Integral32(),
kMachAnyTagged};
SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
Node* p0 = t.Parameter(0);
t.StoreElement(access, t.PointerConstant(smis),
t.Int32Constant(static_cast<int>(j)),
t.Int32Constant(static_cast<int>(arraysize(smis))), p0);
t.Int32Constant(static_cast<int>(j)), p0);
t.Return(p0);
t.LowerAllNodes();
......@@ -452,10 +447,8 @@ class AccessTester : public HandleAndZoneScope {
SimplifiedLoweringTester<Object*> t;
Node* ptr = GetBaseNode(&t);
Node* load = t.LoadElement(access, ptr, t.Int32Constant(from_index),
t.Int32Constant(static_cast<int>(num_elements)));
t.StoreElement(access, ptr, t.Int32Constant(to_index),
t.Int32Constant(static_cast<int>(num_elements)), load);
Node* load = t.LoadElement(access, ptr, t.Int32Constant(from_index));
t.StoreElement(access, ptr, t.Int32Constant(to_index), load);
t.Return(t.jsgraph.TrueConstant());
t.LowerAllNodes();
t.GenerateCode();
......@@ -542,9 +535,9 @@ class AccessTester : public HandleAndZoneScope {
private:
ElementAccess GetElementAccess() {
ElementAccess access = {
kNoBoundsCheck, tagged ? kTaggedBase : kUntaggedBase,
tagged ? FixedArrayBase::kHeaderSize : 0, Type::Any(), rep};
ElementAccess access = {tagged ? kTaggedBase : kUntaggedBase,
tagged ? FixedArrayBase::kHeaderSize : 0,
Type::Any(), rep};
return access;
}
......@@ -1463,13 +1456,11 @@ TEST(LowerLoadElement_to_load) {
TestingGraph t(Type::Any(), Type::Signed32());
for (size_t i = 0; i < arraysize(kMachineReps); i++) {
ElementAccess access = {kNoBoundsCheck, kTaggedBase,
FixedArrayBase::kHeaderSize, Type::Any(),
kMachineReps[i]};
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
Type::Any(), kMachineReps[i]};
Node* load =
t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0, t.p1,
t.jsgraph.Int32Constant(1024), t.start, t.start);
Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
t.p1, t.start, t.start);
Node* use = t.Use(load, kMachineReps[i]);
t.Return(use);
t.Lower();
......@@ -1487,14 +1478,12 @@ TEST(LowerStoreElement_to_store) {
TestingGraph t(Type::Any(), Type::Signed32());
for (size_t i = 0; i < arraysize(kMachineReps); i++) {
ElementAccess access = {kNoBoundsCheck, kTaggedBase,
FixedArrayBase::kHeaderSize, Type::Any(),
kMachineReps[i]};
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
Type::Any(), kMachineReps[i]};
Node* val = t.ExampleWithOutput(kMachineReps[i]);
Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
t.p1, t.jsgraph.Int32Constant(1024), val,
t.start, t.start);
t.p1, val, t.start, t.start);
t.Effect(store);
t.Lower();
CHECK_EQ(IrOpcode::kStore, store->opcode());
......@@ -1513,13 +1502,12 @@ TEST(LowerStoreElement_to_store) {
TEST(InsertChangeForLoadElementIndex) {
// LoadElement(obj: Tagged, index: kTypeInt32 | kRepTagged, length) =>
// Load(obj, Int32Add(Int32Mul(ChangeTaggedToInt32(index), #k), #k))
TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
ElementAccess access = {kNoBoundsCheck, kTaggedBase,
FixedArrayBase::kHeaderSize, Type::Any(),
TestingGraph t(Type::Any(), Type::Signed32());
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
kMachAnyTagged};
Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
t.p1, t.p2, t.start, t.start);
t.p1, t.start, t.start);
t.Return(load);
t.Lower();
CHECK_EQ(IrOpcode::kLoad, load->opcode());
......@@ -1533,13 +1521,12 @@ TEST(InsertChangeForLoadElementIndex) {
TEST(InsertChangeForStoreElementIndex) {
// StoreElement(obj: Tagged, index: kTypeInt32 | kRepTagged, length, val) =>
// Store(obj, Int32Add(Int32Mul(ChangeTaggedToInt32(index), #k), #k), val)
TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
ElementAccess access = {kNoBoundsCheck, kTaggedBase,
FixedArrayBase::kHeaderSize, Type::Any(),
TestingGraph t(Type::Any(), Type::Signed32());
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
kMachAnyTagged};
Node* store =
t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0, t.p1, t.p2,
t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0, t.p1,
t.jsgraph.TrueConstant(), t.start, t.start);
t.Effect(store);
t.Lower();
......@@ -1554,12 +1541,11 @@ TEST(InsertChangeForStoreElementIndex) {
TEST(InsertChangeForLoadElement) {
// TODO(titzer): test all load/store representation change insertions.
TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
ElementAccess access = {kNoBoundsCheck, kTaggedBase,
FixedArrayBase::kHeaderSize, Type::Any(),
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
kMachFloat64};
Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
t.p1, t.p1, t.start, t.start);
t.p1, t.start, t.start);
t.Return(load);
t.Lower();
CHECK_EQ(IrOpcode::kLoad, load->opcode());
......@@ -1586,14 +1572,13 @@ TEST(InsertChangeForLoadField) {
TEST(InsertChangeForStoreElement) {
// TODO(titzer): test all load/store representation change insertions.
TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
ElementAccess access = {kNoBoundsCheck, kTaggedBase,
FixedArrayBase::kHeaderSize, Type::Any(),
TestingGraph t(Type::Any(), Type::Signed32());
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
kMachFloat64};
Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
t.jsgraph.Int32Constant(0), t.p2, t.p1,
t.start, t.start);
Node* store =
t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
t.jsgraph.Int32Constant(0), t.p1, t.start, t.start);
t.Effect(store);
t.Lower();
......
......@@ -20,10 +20,9 @@ namespace compiler {
namespace {
const ExternalArrayType kExternalArrayTypes[] = {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) kExternal##Type##Array,
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
};
kExternalUint8Array, kExternalInt8Array, kExternalUint16Array,
kExternalInt16Array, kExternalUint32Array, kExternalInt32Array,
kExternalFloat32Array, kExternalFloat64Array};
Type* const kJSTypes[] = {Type::Undefined(), Type::Null(), Type::Boolean(),
......@@ -244,8 +243,11 @@ TEST_F(JSTypedLoweringTest, JSLoadPropertyFromExternalTypedArray) {
TRACED_FOREACH(ExternalArrayType, type, kExternalArrayTypes) {
Handle<JSTypedArray> array =
factory()->NewJSTypedArray(type, buffer, 0, kLength);
int const element_size = static_cast<int>(array->element_size());
Node* key = Parameter(Type::Integral32());
Node* key = Parameter(
Type::Range(factory()->NewNumber(kMinInt / element_size),
factory()->NewNumber(kMaxInt / element_size), zone()));
Node* base = HeapConstant(array);
Node* context = UndefinedConstant();
Node* effect = graph()->start();
......@@ -259,12 +261,59 @@ TEST_F(JSTypedLoweringTest, JSLoadPropertyFromExternalTypedArray) {
node->AppendInput(zone(), control);
Reduction r = Reduce(node);
Matcher<Node*> offset_matcher =
element_size == 1
? key
: IsWord32Shl(key, IsInt32Constant(WhichPowerOf2(element_size)));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsLoadElement(
AccessBuilder::ForTypedArrayElement(type, true),
EXPECT_THAT(
r.replacement(),
IsLoadBuffer(BufferAccess(type),
IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
key, IsNumberConstant(array->length()->Number()), effect));
offset_matcher,
IsNumberConstant(array->byte_length()->Number()), effect,
control));
}
}
TEST_F(JSTypedLoweringTest, JSLoadPropertyFromExternalTypedArrayWithSafeKey) {
const size_t kLength = 17;
double backing_store[kLength];
Handle<JSArrayBuffer> buffer =
NewArrayBuffer(backing_store, sizeof(backing_store));
VectorSlotPair feedback(Handle<TypeFeedbackVector>::null(),
FeedbackVectorICSlot::Invalid());
TRACED_FOREACH(ExternalArrayType, type, kExternalArrayTypes) {
Handle<JSTypedArray> array =
factory()->NewJSTypedArray(type, buffer, 0, kLength);
ElementAccess access = AccessBuilder::ForTypedArrayElement(type, true);
int min = random_number_generator()->NextInt(static_cast<int>(kLength));
int max = random_number_generator()->NextInt(static_cast<int>(kLength));
if (min > max) std::swap(min, max);
Node* key = Parameter(Type::Range(factory()->NewNumber(min),
factory()->NewNumber(max), zone()));
Node* base = HeapConstant(array);
Node* context = UndefinedConstant();
Node* effect = graph()->start();
Node* control = graph()->start();
Node* node = graph()->NewNode(javascript()->LoadProperty(feedback), base,
key, context);
if (FLAG_turbo_deoptimization) {
node->AppendInput(zone(), UndefinedConstant());
}
node->AppendInput(zone(), effect);
node->AppendInput(zone(), control);
Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
IsLoadElement(access,
IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
key, effect, control));
}
}
......@@ -282,8 +331,11 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArray) {
TRACED_FOREACH(StrictMode, strict_mode, kStrictModes) {
Handle<JSTypedArray> array =
factory()->NewJSTypedArray(type, buffer, 0, kLength);
int const element_size = static_cast<int>(array->element_size());
Node* key = Parameter(Type::Integral32());
Node* key = Parameter(
Type::Range(factory()->NewNumber(kMinInt / element_size),
factory()->NewNumber(kMaxInt / element_size), zone()));
Node* base = HeapConstant(array);
Node* value =
Parameter(AccessBuilder::ForTypedArrayElement(type, true).type);
......@@ -299,12 +351,18 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArray) {
node->AppendInput(zone(), control);
Reduction r = Reduce(node);
Matcher<Node*> offset_matcher =
element_size == 1
? key
: IsWord32Shl(key, IsInt32Constant(WhichPowerOf2(element_size)));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsStoreElement(
AccessBuilder::ForTypedArrayElement(type, true),
EXPECT_THAT(
r.replacement(),
IsStoreBuffer(BufferAccess(type),
IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
key, IsNumberConstant(array->length()->Number()), value,
offset_matcher,
IsNumberConstant(array->byte_length()->Number()), value,
effect, control));
}
}
......@@ -320,8 +378,11 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithConversion) {
TRACED_FOREACH(StrictMode, strict_mode, kStrictModes) {
Handle<JSTypedArray> array =
factory()->NewJSTypedArray(type, buffer, 0, kLength);
int const element_size = static_cast<int>(array->element_size());
Node* key = Parameter(Type::Integral32());
Node* key = Parameter(
Type::Range(factory()->NewNumber(kMinInt / element_size),
factory()->NewNumber(kMaxInt / element_size), zone()));
Node* base = HeapConstant(array);
Node* value = Parameter(Type::Any());
Node* context = UndefinedConstant();
......@@ -336,6 +397,11 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithConversion) {
node->AppendInput(zone(), control);
Reduction r = Reduce(node);
Matcher<Node*> offset_matcher =
element_size == 1
? key
: IsWord32Shl(key, IsInt32Constant(WhichPowerOf2(element_size)));
Matcher<Node*> value_matcher =
IsToNumber(value, context, effect, control);
Matcher<Node*> effect_matcher = value_matcher;
......@@ -348,16 +414,58 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithConversion) {
}
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsStoreElement(
AccessBuilder::ForTypedArrayElement(type, true),
EXPECT_THAT(
r.replacement(),
IsStoreBuffer(BufferAccess(type),
IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
key, IsNumberConstant(array->length()->Number()),
offset_matcher,
IsNumberConstant(array->byte_length()->Number()),
value_matcher, effect_matcher, control));
}
}
}
TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithSafeKey) {
const size_t kLength = 17;
double backing_store[kLength];
Handle<JSArrayBuffer> buffer =
NewArrayBuffer(backing_store, sizeof(backing_store));
TRACED_FOREACH(ExternalArrayType, type, kExternalArrayTypes) {
TRACED_FOREACH(StrictMode, strict_mode, kStrictModes) {
Handle<JSTypedArray> array =
factory()->NewJSTypedArray(type, buffer, 0, kLength);
ElementAccess access = AccessBuilder::ForTypedArrayElement(type, true);
int min = random_number_generator()->NextInt(static_cast<int>(kLength));
int max = random_number_generator()->NextInt(static_cast<int>(kLength));
if (min > max) std::swap(min, max);
Node* key = Parameter(Type::Range(factory()->NewNumber(min),
factory()->NewNumber(max), zone()));
Node* base = HeapConstant(array);
Node* value = Parameter(access.type);
Node* context = UndefinedConstant();
Node* effect = graph()->start();
Node* control = graph()->start();
Node* node = graph()->NewNode(javascript()->StoreProperty(strict_mode),
base, key, value, context);
if (FLAG_turbo_deoptimization) {
node->AppendInput(zone(), UndefinedConstant());
}
node->AppendInput(zone(), effect);
node->AppendInput(zone(), control);
Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
IsStoreElement(
access, IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
key, value, effect, control));
}
}
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -512,7 +512,9 @@ TEST_F(MachineOperatorReducerTest, Word32AndWithWord32AndWithConstant) {
graph()->NewNode(machine()->Word32And(), p0, Int32Constant(k)),
Int32Constant(l)));
ASSERT_TRUE(r1.Changed());
EXPECT_THAT(r1.replacement(), IsWord32And(p0, IsInt32Constant(k & l)));
EXPECT_THAT(r1.replacement(),
(k & l) ? IsWord32And(p0, IsInt32Constant(k & l))
: IsInt32Constant(0));
// (K & x) & L => x & (K & L)
Reduction const r2 = Reduce(graph()->NewNode(
......@@ -520,7 +522,9 @@ TEST_F(MachineOperatorReducerTest, Word32AndWithWord32AndWithConstant) {
graph()->NewNode(machine()->Word32And(), Int32Constant(k), p0),
Int32Constant(l)));
ASSERT_TRUE(r2.Changed());
EXPECT_THAT(r2.replacement(), IsWord32And(p0, IsInt32Constant(k & l)));
EXPECT_THAT(r2.replacement(),
(k & l) ? IsWord32And(p0, IsInt32Constant(k & l))
: IsInt32Constant(0));
}
}
}
......@@ -740,6 +744,28 @@ TEST_F(MachineOperatorReducerTest, Word32ShlWithWord32Sar) {
}
TEST_F(MachineOperatorReducerTest,
Word32ShlWithWord32SarAndInt32AddAndConstant) {
Node* const p0 = Parameter(0);
TRACED_FOREACH(int32_t, k, kInt32Values) {
TRACED_FORRANGE(int32_t, l, 1, 31) {
// (x + (K << L)) >> L << L => (x & (-1 << L)) + (K << L)
Reduction const r = Reduce(graph()->NewNode(
machine()->Word32Shl(),
graph()->NewNode(machine()->Word32Sar(),
graph()->NewNode(machine()->Int32Add(), p0,
Int32Constant(k << l)),
Int32Constant(l)),
Int32Constant(l)));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsInt32Add(IsWord32And(p0, IsInt32Constant(-1 << l)),
IsInt32Constant(k << l)));
}
}
}
TEST_F(MachineOperatorReducerTest, Word32ShlWithWord32Shr) {
Node* p0 = Parameter(0);
TRACED_FORRANGE(int32_t, x, 1, 31) {
......
This diff is collapsed.
......@@ -21,6 +21,7 @@ class Unique;
namespace compiler {
// Forward declarations.
class BufferAccess;
class CallDescriptor;
struct ElementAccess;
struct FieldAccess;
......@@ -87,15 +88,27 @@ Matcher<Node*> IsLoadField(const Matcher<FieldAccess>& access_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsLoadBuffer(const Matcher<BufferAccess>& access_matcher,
const Matcher<Node*>& buffer_matcher,
const Matcher<Node*>& offset_matcher,
const Matcher<Node*>& length_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsStoreBuffer(const Matcher<BufferAccess>& access_matcher,
const Matcher<Node*>& buffer_matcher,
const Matcher<Node*>& offset_matcher,
const Matcher<Node*>& length_matcher,
const Matcher<Node*>& value_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsLoadElement(const Matcher<ElementAccess>& access_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& length_matcher,
const Matcher<Node*>& control_matcher,
const Matcher<Node*>& effect_matcher);
Matcher<Node*> IsStoreElement(const Matcher<ElementAccess>& access_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& length_matcher,
const Matcher<Node*>& value_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
......
......@@ -478,126 +478,6 @@ TEST_F(SimplifiedOperatorReducerTest, ChangeUint32ToTagged) {
}
}
// -----------------------------------------------------------------------------
// LoadElement
TEST_F(SimplifiedOperatorReducerTest, LoadElementWithConstantKeyAndLength) {
ElementAccess const access = {kTypedArrayBoundsCheck, kUntaggedBase, 0,
Type::Any(), kMachAnyTagged};
ElementAccess access_nocheck = access;
access_nocheck.bounds_check = kNoBoundsCheck;
Node* const base = Parameter(0);
Node* const effect = graph()->start();
{
Node* const key = NumberConstant(-42.0);
Node* const length = NumberConstant(100.0);
Reduction r = Reduce(graph()->NewNode(simplified()->LoadElement(access),
base, key, length, effect));
ASSERT_FALSE(r.Changed());
}
{
Node* const key = NumberConstant(-0.0);
Node* const length = NumberConstant(1.0);
Reduction r = Reduce(graph()->NewNode(simplified()->LoadElement(access),
base, key, length, effect));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsLoadElement(access_nocheck, base, key, length, effect));
}
{
Node* const key = NumberConstant(0);
Node* const length = NumberConstant(1);
Reduction r = Reduce(graph()->NewNode(simplified()->LoadElement(access),
base, key, length, effect));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsLoadElement(access_nocheck, base, key, length, effect));
}
{
Node* const key = NumberConstant(42.2);
Node* const length = NumberConstant(128);
Reduction r = Reduce(graph()->NewNode(simplified()->LoadElement(access),
base, key, length, effect));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsLoadElement(access_nocheck, base, key, length, effect));
}
{
Node* const key = NumberConstant(39.2);
Node* const length = NumberConstant(32.0);
Reduction r = Reduce(graph()->NewNode(simplified()->LoadElement(access),
base, key, length, effect));
ASSERT_FALSE(r.Changed());
}
}
// -----------------------------------------------------------------------------
// StoreElement
TEST_F(SimplifiedOperatorReducerTest, StoreElementWithConstantKeyAndLength) {
ElementAccess const access = {kTypedArrayBoundsCheck, kUntaggedBase, 0,
Type::Any(), kMachAnyTagged};
ElementAccess access_nocheck = access;
access_nocheck.bounds_check = kNoBoundsCheck;
Node* const base = Parameter(0);
Node* const value = Parameter(1);
Node* const effect = graph()->start();
Node* const control = graph()->start();
{
Node* const key = NumberConstant(-72.1);
Node* const length = NumberConstant(0.0);
Reduction r =
Reduce(graph()->NewNode(simplified()->StoreElement(access), base, key,
length, value, effect, control));
ASSERT_FALSE(r.Changed());
}
{
Node* const key = NumberConstant(-0.0);
Node* const length = NumberConstant(999);
Reduction r =
Reduce(graph()->NewNode(simplified()->StoreElement(access), base, key,
length, value, effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsStoreElement(access_nocheck, base, key, length, value, effect,
control));
}
{
Node* const key = NumberConstant(0);
Node* const length = NumberConstant(1);
Reduction r =
Reduce(graph()->NewNode(simplified()->StoreElement(access), base, key,
length, value, effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsStoreElement(access_nocheck, base, key, length, value, effect,
control));
}
{
Node* const key = NumberConstant(42.2);
Node* const length = NumberConstant(128);
Reduction r =
Reduce(graph()->NewNode(simplified()->StoreElement(access), base, key,
length, value, effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsStoreElement(access_nocheck, base, key, length, value, effect,
control));
}
{
Node* const key = NumberConstant(39.2);
Node* const length = NumberConstant(32.0);
Reduction r =
Reduce(graph()->NewNode(simplified()->StoreElement(access), base, key,
length, value, effect, control));
ASSERT_FALSE(r.Changed());
}
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -113,50 +113,117 @@ INSTANTIATE_TEST_CASE_P(SimplifiedOperatorTest, SimplifiedPureOperatorTest,
::testing::ValuesIn(kPureOperators));
// -----------------------------------------------------------------------------
// Buffer access operators.
namespace {
const ExternalArrayType kExternalArrayTypes[] = {
kExternalUint8Array, kExternalInt8Array, kExternalUint16Array,
kExternalInt16Array, kExternalUint32Array, kExternalInt32Array,
kExternalFloat32Array, kExternalFloat64Array};
} // namespace
class SimplifiedBufferAccessOperatorTest
: public TestWithZone,
public ::testing::WithParamInterface<ExternalArrayType> {};
TEST_P(SimplifiedBufferAccessOperatorTest, InstancesAreGloballyShared) {
BufferAccess const access(GetParam());
SimplifiedOperatorBuilder simplified1(zone());
SimplifiedOperatorBuilder simplified2(zone());
EXPECT_EQ(simplified1.LoadBuffer(access), simplified2.LoadBuffer(access));
EXPECT_EQ(simplified1.StoreBuffer(access), simplified2.StoreBuffer(access));
}
TEST_P(SimplifiedBufferAccessOperatorTest, LoadBuffer) {
SimplifiedOperatorBuilder simplified(zone());
BufferAccess const access(GetParam());
const Operator* op = simplified.LoadBuffer(access);
EXPECT_EQ(IrOpcode::kLoadBuffer, op->opcode());
EXPECT_EQ(Operator::kNoThrow | Operator::kNoWrite, op->properties());
EXPECT_EQ(access, BufferAccessOf(op));
EXPECT_EQ(3, op->ValueInputCount());
EXPECT_EQ(1, op->EffectInputCount());
EXPECT_EQ(1, op->ControlInputCount());
EXPECT_EQ(5, OperatorProperties::GetTotalInputCount(op));
EXPECT_EQ(1, op->ValueOutputCount());
EXPECT_EQ(1, op->EffectOutputCount());
EXPECT_EQ(0, op->ControlOutputCount());
}
TEST_P(SimplifiedBufferAccessOperatorTest, StoreBuffer) {
SimplifiedOperatorBuilder simplified(zone());
BufferAccess const access(GetParam());
const Operator* op = simplified.StoreBuffer(access);
EXPECT_EQ(IrOpcode::kStoreBuffer, op->opcode());
EXPECT_EQ(Operator::kNoRead | Operator::kNoThrow, op->properties());
EXPECT_EQ(access, BufferAccessOf(op));
EXPECT_EQ(4, op->ValueInputCount());
EXPECT_EQ(1, op->EffectInputCount());
EXPECT_EQ(1, op->ControlInputCount());
EXPECT_EQ(6, OperatorProperties::GetTotalInputCount(op));
EXPECT_EQ(0, op->ValueOutputCount());
EXPECT_EQ(1, op->EffectOutputCount());
EXPECT_EQ(0, op->ControlOutputCount());
}
INSTANTIATE_TEST_CASE_P(SimplifiedOperatorTest,
SimplifiedBufferAccessOperatorTest,
::testing::ValuesIn(kExternalArrayTypes));
// -----------------------------------------------------------------------------
// Element access operators.
namespace {
const ElementAccess kElementAccesses[] = {
{kNoBoundsCheck, kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
kMachAnyTagged},
{kNoBoundsCheck, kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag,
Type::Any(), kMachInt8},
{kNoBoundsCheck, kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag,
Type::Any(), kMachInt16},
{kNoBoundsCheck, kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag,
Type::Any(), kMachInt32},
{kNoBoundsCheck, kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag,
Type::Any(), kMachUint8},
{kNoBoundsCheck, kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag,
Type::Any(), kMachUint16},
{kNoBoundsCheck, kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag,
Type::Any(), kMachUint32},
{kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Signed32(), kMachInt8},
{kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Unsigned32(), kMachUint8},
{kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Signed32(), kMachInt16},
{kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Unsigned32(), kMachUint16},
{kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Signed32(), kMachInt32},
{kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Unsigned32(), kMachUint32},
{kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Number(), kRepFloat32},
{kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Number(), kRepFloat64},
{kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
Type::Signed32(), kMachInt8},
{kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
Type::Unsigned32(), kMachUint8},
{kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
Type::Signed32(), kMachInt16},
{kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
Type::Unsigned32(), kMachUint16},
{kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
Type::Signed32(), kMachInt32},
{kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
Type::Unsigned32(), kMachUint32},
{kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
Type::Number(), kRepFloat32},
{kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
Type::Number(), kRepFloat64}};
{kTaggedBase, FixedArray::kHeaderSize, Type::Any(), kMachAnyTagged},
{kUntaggedBase, 0, Type::Any(), kMachInt8},
{kUntaggedBase, 0, Type::Any(), kMachInt16},
{kUntaggedBase, 0, Type::Any(), kMachInt32},
{kUntaggedBase, 0, Type::Any(), kMachUint8},
{kUntaggedBase, 0, Type::Any(), kMachUint16},
{kUntaggedBase, 0, Type::Any(), kMachUint32},
{kUntaggedBase, 0, Type::Signed32(), kMachInt8},
{kUntaggedBase, 0, Type::Unsigned32(), kMachUint8},
{kUntaggedBase, 0, Type::Signed32(), kMachInt16},
{kUntaggedBase, 0, Type::Unsigned32(), kMachUint16},
{kUntaggedBase, 0, Type::Signed32(), kMachInt32},
{kUntaggedBase, 0, Type::Unsigned32(), kMachUint32},
{kUntaggedBase, 0, Type::Number(), kRepFloat32},
{kUntaggedBase, 0, Type::Number(), kRepFloat64},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
kMachInt8},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
kMachUint8},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
kMachInt16},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
kMachUint16},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
kMachInt32},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
kMachUint32},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Number(),
kRepFloat32},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Number(),
kRepFloat64}};
} // namespace
......@@ -175,9 +242,9 @@ TEST_P(SimplifiedElementAccessOperatorTest, LoadElement) {
EXPECT_EQ(Operator::kNoThrow | Operator::kNoWrite, op->properties());
EXPECT_EQ(access, ElementAccessOf(op));
EXPECT_EQ(3, op->ValueInputCount());
EXPECT_EQ(2, op->ValueInputCount());
EXPECT_EQ(1, op->EffectInputCount());
EXPECT_EQ(0, op->ControlInputCount());
EXPECT_EQ(1, op->ControlInputCount());
EXPECT_EQ(4, OperatorProperties::GetTotalInputCount(op));
EXPECT_EQ(1, op->ValueOutputCount());
......@@ -195,10 +262,10 @@ TEST_P(SimplifiedElementAccessOperatorTest, StoreElement) {
EXPECT_EQ(Operator::kNoRead | Operator::kNoThrow, op->properties());
EXPECT_EQ(access, ElementAccessOf(op));
EXPECT_EQ(4, op->ValueInputCount());
EXPECT_EQ(3, op->ValueInputCount());
EXPECT_EQ(1, op->EffectInputCount());
EXPECT_EQ(1, op->ControlInputCount());
EXPECT_EQ(6, OperatorProperties::GetTotalInputCount(op));
EXPECT_EQ(5, OperatorProperties::GetTotalInputCount(op));
EXPECT_EQ(0, op->ValueOutputCount());
EXPECT_EQ(1, op->EffectOutputCount());
......
......@@ -98,6 +98,11 @@ TestWithIsolate::~TestWithIsolate() {}
Factory* TestWithIsolate::factory() const { return isolate()->factory(); }
base::RandomNumberGenerator* TestWithIsolate::random_number_generator() const {
return isolate()->random_number_generator();
}
TestWithZone::~TestWithZone() {}
} // namespace internal
......
......@@ -83,6 +83,7 @@ class TestWithIsolate : public virtual ::v8::TestWithIsolate {
Isolate* isolate() const {
return reinterpret_cast<Isolate*>(::v8::TestWithIsolate::isolate());
}
base::RandomNumberGenerator* random_number_generator() const;
private:
DISALLOW_COPY_AND_ASSIGN(TestWithIsolate);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment