Commit 3d9080a5 authored by marja@chromium.org's avatar marja@chromium.org

Revert "ARM: Use the shifter operand to merge in previous shift instructions."

This reverts r 22017

Reason: broke Nexus 7 GPU bots (see crbug.com/389198)

BUG=389198
LOG=n
TBR=machenbach@chromium.org, ulan@chromium.org

Review URL: https://codereview.chromium.org/359713004

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22050 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent e25f8334
......@@ -281,7 +281,6 @@ Operand::Operand(Handle<Object> handle) {
Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
ASSERT(is_uint5(shift_imm));
ASSERT(shift_op != NO_SHIFT);
rm_ = rm;
rs_ = no_reg;
......@@ -302,7 +301,7 @@ Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
ASSERT((shift_op != RRX) && (shift_op != NO_SHIFT));
ASSERT(shift_op != RRX);
rm_ = rm;
rs_ = no_reg;
shift_op_ = shift_op;
......@@ -958,7 +957,7 @@ void Assembler::next(Label* L) {
// If this returns true then you have to use the rotate_imm and immed_8
// that it returns, because it may have already changed the instruction
// to match them!
bool fits_shifter(uint32_t imm32,
static bool fits_shifter(uint32_t imm32,
uint32_t* rotate_imm,
uint32_t* immed_8,
Instr* instr) {
......@@ -966,8 +965,8 @@ bool fits_shifter(uint32_t imm32,
for (int rot = 0; rot < 16; rot++) {
uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
if ((imm8 <= 0xff)) {
if (rotate_imm != NULL) *rotate_imm = rot;
if (immed_8 != NULL) *immed_8 = imm8;
*rotate_imm = rot;
*immed_8 = imm8;
return true;
}
}
......@@ -983,8 +982,7 @@ bool fits_shifter(uint32_t imm32,
if (imm32 < 0x10000) {
*instr ^= kMovwLeaveCCFlip;
*instr |= EncodeMovwImmediate(imm32);
if (rotate_imm != NULL) *rotate_imm = 0; // Not used for movw.
if (immed_8 != NULL) *immed_8 = 0; // Not used for movw.
*rotate_imm = *immed_8 = 0; // Not used for movw.
return true;
}
}
......
......@@ -1598,10 +1598,6 @@ class Assembler : public AssemblerBase {
};
bool fits_shifter(uint32_t imm32, uint32_t* rotate_imm,
uint32_t* immed_8, Instr* instr);
class EnsureSpace BASE_EMBEDDED {
public:
explicit EnsureSpace(Assembler* assembler) {
......
......@@ -236,7 +236,6 @@ enum ShiftOp {
// as an argument, and will never actually be encoded. The Assembler will
// detect it and emit the correct ROR shift operand with shift_imm == 0.
RRX = -1,
NO_SHIFT = -2,
kNumberOfShifts = 4
};
......
......@@ -676,127 +676,11 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
}
HBitwiseBinaryOperation* LChunkBuilder::CanTransformToShiftedOp(HValue* val,
HValue** left) {
if (!val->representation().IsInteger32()) return NULL;
if (!(val->IsBitwise() || val->IsAdd() || val->IsSub())) return NULL;
HBinaryOperation* hinstr = HBinaryOperation::cast(val);
HValue* hleft = hinstr->left();
HValue* hright = hinstr->right();
ASSERT(hleft->representation().Equals(hinstr->representation()));
ASSERT(hright->representation().Equals(hinstr->representation()));
if ((hright->IsConstant() &&
LikelyFitsImmField(hinstr, HConstant::cast(hright)->Integer32Value())) ||
(hinstr->IsCommutative() && hleft->IsConstant() &&
LikelyFitsImmField(hinstr, HConstant::cast(hleft)->Integer32Value()))) {
// The constant operand will likely fit in the immediate field. We are
// better off with
// mov r1, r2 LSL #imm
// add r0, r1, #imm2
// than with
// mov r5, #imm2
// add r0, r5, r2 LSL #imm
return NULL;
}
HBitwiseBinaryOperation* shift = NULL;
// TODO(aleram): We will miss situations where a shift operation is used by
// different instructions both as a left and right operands.
if (hright->IsBitwiseBinaryShift() &&
HBitwiseBinaryOperation::cast(hright)->right()->IsConstant()) {
shift = HBitwiseBinaryOperation::cast(hright);
if (left != NULL) {
*left = hleft;
}
} else if (hinstr->IsCommutative() &&
hleft->IsBitwiseBinaryShift() &&
HBitwiseBinaryOperation::cast(hleft)->right()->IsConstant()) {
shift = HBitwiseBinaryOperation::cast(hleft);
if (left != NULL) {
*left = hright;
}
} else {
return NULL;
}
if ((JSShiftAmountFromHConstant(shift->right()) == 0) && shift->IsShr()) {
// Logical shifts right by zero can deoptimize.
return NULL;
}
return shift;
}
bool LChunkBuilder::ShiftCanBeOptimizedAway(HBitwiseBinaryOperation* shift) {
if (!shift->representation().IsInteger32()) {
return false;
}
for (HUseIterator it(shift->uses()); !it.Done(); it.Advance()) {
if (shift != CanTransformToShiftedOp(it.value())) {
return false;
}
}
return true;
}
LInstruction* LChunkBuilder::TryDoOpWithShiftedRightOperand(
HBinaryOperation* instr) {
HValue* left;
HBitwiseBinaryOperation* shift = CanTransformToShiftedOp(instr, &left);
if ((shift != NULL) && ShiftCanBeOptimizedAway(shift)) {
return DoShiftedBinaryOp(instr, left, shift);
}
return NULL;
}
LInstruction* LChunkBuilder::DoShiftedBinaryOp(
HBinaryOperation* hinstr, HValue* hleft, HBitwiseBinaryOperation* hshift) {
ASSERT(hshift->IsBitwiseBinaryShift());
ASSERT(!hshift->IsShr() || (JSShiftAmountFromHConstant(hshift->right()) > 0));
LTemplateResultInstruction<1>* res;
LOperand* left = UseRegisterAtStart(hleft);
LOperand* right = UseRegisterAtStart(hshift->left());
LOperand* shift_amount = UseConstant(hshift->right());
ShiftOp shift_op;
switch (hshift->opcode()) {
case HValue::kShl: shift_op = LSL; break;
case HValue::kShr: shift_op = LSR; break;
case HValue::kSar: shift_op = ASR; break;
default: UNREACHABLE(); shift_op = NO_SHIFT;
}
if (hinstr->IsBitwise()) {
res = new(zone()) LBitI(left, right, shift_op, shift_amount);
} else if (hinstr->IsAdd()) {
res = new(zone()) LAddI(left, right, shift_op, shift_amount);
} else {
ASSERT(hinstr->IsSub());
res = new(zone()) LSubI(left, right, shift_op, shift_amount);
}
if (hinstr->CheckFlag(HValue::kCanOverflow)) {
AssignEnvironment(res);
}
return DefineAsRegister(res);
}
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
if (ShiftCanBeOptimizedAway(instr)) {
return NULL;
}
LOperand* left = UseRegisterAtStart(instr->left());
HValue* right_value = instr->right();
......@@ -806,7 +690,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
if (right_value->IsConstant()) {
HConstant* constant = HConstant::cast(right_value);
right = chunk_->DefineConstantOperand(constant);
constant_value = JSShiftAmountFromHConstant(constant);
constant_value = constant->Integer32Value() & 0x1f;
// Left shifts can deoptimize if we shift by > 0 and the result cannot be
// truncated to smi.
if (instr->representation().IsSmi() && constant_value > 0) {
......@@ -1366,11 +1250,6 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
ASSERT(instr->right()->representation().Equals(instr->representation()));
ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
if (shifted_operation != NULL) {
return shifted_operation;
}
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineAsRegister(new(zone()) LBitI(left, right));
......@@ -1654,11 +1533,6 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
if (shifted_operation != NULL) {
return shifted_operation;
}
if (instr->left()->IsConstant()) {
// If lhs is constant, do reverse subtraction instead.
return DoRSub(instr);
......@@ -1726,12 +1600,6 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
if (shifted_operation != NULL) {
return shifted_operation;
}
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
LAddI* add = new(zone()) LAddI(left, right);
......
......@@ -1239,14 +1239,7 @@ class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LBitI(LOperand* left, LOperand* right)
: shift_(NO_SHIFT), shift_amount_(0) {
inputs_[0] = left;
inputs_[1] = right;
}
LBitI(LOperand* left, LOperand* right, ShiftOp shift, LOperand* shift_amount)
: shift_(shift), shift_amount_(shift_amount) {
LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
......@@ -1254,17 +1247,10 @@ class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
ShiftOp shift() const { return shift_; }
LOperand* shift_amount() const { return shift_amount_; }
Token::Value op() const { return hydrogen()->op(); }
DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
DECLARE_HYDROGEN_ACCESSOR(Bitwise)
protected:
ShiftOp shift_;
LOperand* shift_amount_;
};
......@@ -1291,30 +1277,16 @@ class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right)
: shift_(NO_SHIFT), shift_amount_(0) {
inputs_[0] = left;
inputs_[1] = right;
}
LSubI(LOperand* left, LOperand* right, ShiftOp shift, LOperand* shift_amount)
: shift_(shift), shift_amount_(shift_amount) {
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
ShiftOp shift() const { return shift_; }
LOperand* shift_amount() const { return shift_amount_; }
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
DECLARE_HYDROGEN_ACCESSOR(Sub)
protected:
ShiftOp shift_;
LOperand* shift_amount_;
};
......@@ -1483,14 +1455,7 @@ class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right)
: shift_(NO_SHIFT), shift_amount_(0) {
inputs_[0] = left;
inputs_[1] = right;
}
LAddI(LOperand* left, LOperand* right, ShiftOp shift, LOperand* shift_amount)
: shift_(shift), shift_amount_(shift_amount) {
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
......@@ -1498,15 +1463,8 @@ class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
ShiftOp shift() const { return shift_; }
LOperand* shift_amount() const { return shift_amount_; }
DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
DECLARE_HYDROGEN_ACCESSOR(Add)
protected:
ShiftOp shift_;
LOperand* shift_amount_;
};
......@@ -2897,49 +2855,6 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
void AddInstruction(LInstruction* instr, HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
int JSShiftAmountFromHConstant(HValue* constant) {
return HConstant::cast(constant)->Integer32Value() & 0x1f;
}
bool LikelyFitsImmField(HInstruction* instr, int imm) {
Instr instr_bits;
// All arithmetic and logical operations accept the same range of
// immediates. In some cases though, the operation itself can be changed to
// get a wider effective range of immediates.
if (instr->IsAdd() || instr->IsSub()) {
// ADD and SUB can be exchanged with a negate immediate.
instr_bits = ADD;
} else if (HBitwise::cast(instr)->op() == Token::BIT_AND) {
ASSERT(instr->IsBitwise());
// AND and BIC can be exchanged with an inverted immediate.
instr_bits = AND;
} else {
ASSERT(instr->IsBitwise());
// Use ORR for all other operations, since fits_shifter() can't adapt ORR.
instr_bits = ORR;
}
return fits_shifter(imm, NULL, NULL, &instr_bits);
}
// Indicates if a sequence of the form
// mov r1, r2 LSL #imm
// add r0, r5, r1
// can be replaced with:
// add r0, r5, r2 LSL #imm
// If this is not possible, the function returns NULL. Otherwise it returns a
// pointer to the shift instruction that would be optimized away.
HBitwiseBinaryOperation* CanTransformToShiftedOp(HValue* val,
HValue** left = NULL);
// Checks if all uses of the shift operation can optimize it away.
bool ShiftCanBeOptimizedAway(HBitwiseBinaryOperation* shift);
// Attempts to merge the binary operation and a previous shift operation into
// a single operation. Returns the merged instruction on success, and NULL
// otherwise.
LInstruction* TryDoOpWithShiftedRightOperand(HBinaryOperation* op);
LInstruction* DoShiftedBinaryOp(HBinaryOperation* instr,
HValue* left,
HBitwiseBinaryOperation* shift);
LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
......
......@@ -480,19 +480,6 @@ bool LCodeGen::IsInteger32(LConstantOperand* op) const {
}
template<class LI>
Operand LCodeGen::ToShiftedRightOperand(LOperand* right, LI* shift_info) {
if (shift_info->shift() == NO_SHIFT) {
return ToOperand(right);
} else {
return Operand(
ToRegister(right),
shift_info->shift(),
JSShiftAmountFromLConstant(shift_info->shift_amount()));
}
}
bool LCodeGen::IsSmi(LConstantOperand* op) const {
return chunk_->LookupLiteralRepresentation(op).IsSmi();
}
......@@ -1725,13 +1712,11 @@ void LCodeGen::DoBitI(LBitI* instr) {
Register result = ToRegister(instr->result());
Operand right(no_reg);
ASSERT(right_op->IsRegister() || (instr->shift() == NO_SHIFT));
if (right_op->IsStackSlot()) {
right = Operand(EmitLoadRegister(right_op, ip));
} else {
ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
right = ToShiftedRightOperand(right_op, instr);
right = ToOperand(right_op);
}
switch (instr->op()) {
......@@ -1788,7 +1773,9 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
break;
}
} else {
int shift_count = JSShiftAmountFromLConstant(right_op);
// Mask the right_op operand.
int value = ToInteger32(LConstantOperand::cast(right_op));
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
switch (instr->op()) {
case Token::ROR:
if (shift_count != 0) {
......@@ -1848,15 +1835,12 @@ void LCodeGen::DoSubI(LSubI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
SBit set_cond = can_overflow ? SetCC : LeaveCC;
ASSERT(right->IsRegister() || (instr->shift() == NO_SHIFT));
if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, ip);
__ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
} else {
ASSERT(right->IsRegister() || right->IsConstantOperand());
__ sub(ToRegister(result), ToRegister(left),
ToShiftedRightOperand(right, instr), set_cond);
__ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
}
if (can_overflow) {
......@@ -2045,15 +2029,12 @@ void LCodeGen::DoAddI(LAddI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
SBit set_cond = can_overflow ? SetCC : LeaveCC;
ASSERT(right->IsRegister() || (instr->shift() == NO_SHIFT));
if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, ip);
__ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
} else {
ASSERT(right->IsRegister() || right->IsConstantOperand());
__ add(ToRegister(result), ToRegister(left),
ToShiftedRightOperand(right, instr), set_cond);
__ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
}
if (can_overflow) {
......
......@@ -86,13 +86,6 @@ class LCodeGen: public LCodeGenBase {
// Returns a MemOperand pointing to the high word of a DoubleStackSlot.
MemOperand ToHighMemOperand(LOperand* op) const;
template<class LI>
Operand ToShiftedRightOperand(LOperand* right, LI* shift_info);
int JSShiftAmountFromLConstant(LOperand* constant) {
return ToInteger32(LConstantOperand::cast(constant)) & 0x1f;
}
bool IsInteger32(LConstantOperand* op) const;
bool IsSmi(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment