Commit 78881124 authored by verwaest@chromium.org's avatar verwaest@chromium.org

Eliminate intentional conversion from Smi to Int32 in HMul

If not all uses of arithmetic binary operation can be truncated to Smi, check if they can be truncated to Int32 which could avoid minus zero check

Fixed DoMulI on X64 to adopt correct operand size when the representation is Smi

Fixed DoMulI on ARM. Constant right operand optimization is based on Integer 32 instead of its representation.

BUG=
R=verwaest@chromium.org

Review URL: https://chromiumcodereview.appspot.com/22600005

Patch from Weiliang Lin <weiliang.lin2@gmail.com>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@16361 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent f55ba6b7
......@@ -431,7 +431,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
} else if (r.IsDouble()) {
Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
} else {
ASSERT(r.IsTagged());
ASSERT(r.IsSmiOrTagged());
__ LoadObject(scratch, literal);
}
return scratch;
......@@ -1584,10 +1584,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
if (right_op->IsConstantOperand() && !can_overflow) {
// Use optimized code for specific constants.
int32_t constant = ToRepresentation(
LConstantOperand::cast(right_op),
instr->hydrogen()->right()->representation());
int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
if (bailout_on_minus_zero && (constant < 0)) {
// The case of a null constant will be handled separately.
......
......@@ -48,6 +48,10 @@ void HCanonicalizePhase::Run() {
if (instr->HasAtLeastOneUseWithFlagAndNoneWithout(
HInstruction::kTruncatingToSmi)) {
instr->SetFlag(HInstruction::kAllUsesTruncatingToSmi);
} else if (instr->HasAtLeastOneUseWithFlagAndNoneWithout(
HInstruction::kTruncatingToInt32)) {
// Avoid redundant minus zero check
instr->SetFlag(HInstruction::kAllUsesTruncatingToInt32);
}
}
}
......
......@@ -82,24 +82,36 @@ void HInferRepresentationPhase::Run() {
if (done.Contains(i)) continue;
// Check if all uses of all connected phis in this group are truncating.
bool all_uses_everywhere_truncating = true;
bool all_uses_everywhere_truncating_int32 = true;
bool all_uses_everywhere_truncating_smi = true;
for (BitVector::Iterator it(connected_phis[i]);
!it.Done();
it.Advance()) {
int index = it.Current();
all_uses_everywhere_truncating &=
all_uses_everywhere_truncating_int32 &=
phi_list->at(index)->CheckFlag(HInstruction::kTruncatingToInt32);
all_uses_everywhere_truncating_smi &=
phi_list->at(index)->CheckFlag(HInstruction::kTruncatingToSmi);
done.Add(index);
}
if (all_uses_everywhere_truncating) {
continue; // Great, nothing to do.
if (!all_uses_everywhere_truncating_int32) {
// Clear truncation flag of this group of connected phis.
for (BitVector::Iterator it(connected_phis[i]);
!it.Done();
it.Advance()) {
int index = it.Current();
phi_list->at(index)->ClearFlag(HInstruction::kTruncatingToInt32);
}
}
// Clear truncation flag of this group of connected phis.
for (BitVector::Iterator it(connected_phis[i]);
!it.Done();
it.Advance()) {
int index = it.Current();
phi_list->at(index)->ClearFlag(HInstruction::kTruncatingToInt32);
if (!all_uses_everywhere_truncating_smi) {
// Clear truncation flag of this group of connected phis.
for (BitVector::Iterator it(connected_phis[i]);
!it.Done();
it.Advance()) {
int index = it.Current();
phi_list->at(index)->ClearFlag(HInstruction::kTruncatingToSmi);
}
}
}
}
......
......@@ -397,6 +397,18 @@ bool HValue::CheckUsesForFlag(Flag f) const {
}
bool HValue::CheckUsesForFlag(Flag f, HValue** value) const {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
if (it.value()->IsSimulate()) continue;
if (!it.value()->CheckFlag(f)) {
*value = it.value();
return false;
}
}
return true;
}
bool HValue::HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) const {
bool return_value = false;
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
......
......@@ -807,6 +807,8 @@ class HValue : public ZoneObject {
// Returns true if the flag specified is set for all uses, false otherwise.
bool CheckUsesForFlag(Flag f) const;
// Same as before and the first one without the flag is returned in value.
bool CheckUsesForFlag(Flag f, HValue** value) const;
// Returns true if the flag specified is set for all uses, and this set
// of uses is non-empty.
bool HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) const;
......@@ -1545,7 +1547,10 @@ class HChange V8_FINAL : public HUnaryOperation {
ASSERT(!value->representation().Equals(to));
set_representation(to);
SetFlag(kUseGVN);
if (is_truncating_to_smi) SetFlag(kTruncatingToSmi);
if (is_truncating_to_smi) {
SetFlag(kTruncatingToSmi);
SetFlag(kTruncatingToInt32);
}
if (is_truncating_to_int32) SetFlag(kTruncatingToInt32);
if (value->representation().IsSmi() || value->type().IsSmi()) {
set_type(HType::Smi());
......@@ -4514,7 +4519,6 @@ class HMul V8_FINAL : public HArithmeticBinaryOperation {
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
const char* reason) V8_OVERRIDE {
if (new_rep.IsSmi()) new_rep = Representation::Integer32();
HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
......@@ -4724,6 +4728,7 @@ class HBitwise V8_FINAL : public HBitwiseBinaryOperation {
right->representation().IsSmi() &&
HConstant::cast(right)->Integer32Value() >= 0))) {
SetFlag(kTruncatingToSmi);
SetFlag(kTruncatingToInt32);
// BIT_OR with a smi-range negative value will always set the entire
// sign-extension of the smi-sign.
} else if (op == Token::BIT_OR &&
......@@ -4734,6 +4739,7 @@ class HBitwise V8_FINAL : public HBitwiseBinaryOperation {
right->representation().IsSmi() &&
HConstant::cast(right)->Integer32Value() < 0))) {
SetFlag(kTruncatingToSmi);
SetFlag(kTruncatingToInt32);
}
}
......
......@@ -99,7 +99,8 @@ void HRepresentationChangesPhase::Run() {
// int32-phis allow truncation and iteratively remove the ones that
// are used in an operation that does not allow a truncating
// conversion.
ZoneList<HPhi*> worklist(8, zone());
ZoneList<HPhi*> int_worklist(8, zone());
ZoneList<HPhi*> smi_worklist(8, zone());
const ZoneList<HPhi*>* phi_list(graph()->phi_list());
for (int i = 0; i < phi_list->length(); i++) {
......@@ -108,51 +109,64 @@ void HRepresentationChangesPhase::Run() {
phi->SetFlag(HValue::kTruncatingToInt32);
} else if (phi->representation().IsSmi()) {
phi->SetFlag(HValue::kTruncatingToSmi);
phi->SetFlag(HValue::kTruncatingToInt32);
}
}
for (int i = 0; i < phi_list->length(); i++) {
HPhi* phi = phi_list->at(i);
for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
// If a Phi is used as a non-truncating int32 or as a double,
// clear its "truncating" flag.
HValue* use = it.value();
Representation input_representation =
use->RequiredInputRepresentation(it.index());
if ((phi->representation().IsInteger32() &&
!(input_representation.IsInteger32() &&
use->CheckFlag(HValue::kTruncatingToInt32))) ||
(phi->representation().IsSmi() &&
!(input_representation.IsSmi() &&
use->CheckFlag(HValue::kTruncatingToSmi)))) {
HValue* value = NULL;
if (phi->representation().IsSmiOrInteger32() &&
!phi->CheckUsesForFlag(HValue::kTruncatingToInt32, &value)) {
int_worklist.Add(phi, zone());
phi->ClearFlag(HValue::kTruncatingToInt32);
if (FLAG_trace_representation) {
PrintF("#%d Phi is not truncating Int32 because of #%d %s\n",
phi->id(), value->id(), value->Mnemonic());
}
}
if (phi->representation().IsSmi() &&
!phi->CheckUsesForFlag(HValue::kTruncatingToSmi, &value)) {
smi_worklist.Add(phi, zone());
phi->ClearFlag(HValue::kTruncatingToSmi);
if (FLAG_trace_representation) {
PrintF("#%d Phi is not truncating Smi because of #%d %s\n",
phi->id(), value->id(), value->Mnemonic());
}
}
}
while (!int_worklist.is_empty()) {
HPhi* current = int_worklist.RemoveLast();
for (int i = 0; i < current->OperandCount(); ++i) {
HValue* input = current->OperandAt(i);
if (input->IsPhi() &&
input->representation().IsSmiOrInteger32() &&
input->CheckFlag(HValue::kTruncatingToInt32)) {
if (FLAG_trace_representation) {
PrintF("#%d Phi is not truncating because of #%d %s\n",
phi->id(), it.value()->id(), it.value()->Mnemonic());
PrintF("#%d Phi is not truncating Int32 because of #%d %s\n",
input->id(), current->id(), current->Mnemonic());
}
phi->ClearFlag(HValue::kTruncatingToInt32);
phi->ClearFlag(HValue::kTruncatingToSmi);
worklist.Add(phi, zone());
break;
input->ClearFlag(HValue::kTruncatingToInt32);
int_worklist.Add(HPhi::cast(input), zone());
}
}
}
while (!worklist.is_empty()) {
HPhi* current = worklist.RemoveLast();
while (!smi_worklist.is_empty()) {
HPhi* current = smi_worklist.RemoveLast();
for (int i = 0; i < current->OperandCount(); ++i) {
HValue* input = current->OperandAt(i);
if (input->IsPhi() &&
((input->representation().IsInteger32() &&
input->CheckFlag(HValue::kTruncatingToInt32)) ||
(input->representation().IsSmi() &&
input->CheckFlag(HValue::kTruncatingToSmi)))) {
input->representation().IsSmi() &&
input->CheckFlag(HValue::kTruncatingToSmi)) {
if (FLAG_trace_representation) {
PrintF("#%d Phi is not truncating because of #%d %s\n",
PrintF("#%d Phi is not truncating Smi because of #%d %s\n",
input->id(), current->id(), current->Mnemonic());
}
input->ClearFlag(HValue::kTruncatingToInt32);
input->ClearFlag(HValue::kTruncatingToSmi);
worklist.Add(HPhi::cast(input), zone());
smi_worklist.Add(HPhi::cast(input), zone());
}
}
}
......
......@@ -4722,6 +4722,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPrecision) {
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsValidSmi) {
HandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_NUMBER_CHECKED(int32_t, number, Int32, args[0]);
if (Smi::IsValid(number)) {
return isolate->heap()->true_value();
} else {
return isolate->heap()->false_value();
}
}
// Returns a single character string where first character equals
// string->Get(index).
static Handle<Object> GetCharAt(Handle<String> string, uint32_t index) {
......
......@@ -221,7 +221,8 @@ namespace internal {
F(NumberToRadixString, 2, 1) \
F(NumberToFixed, 2, 1) \
F(NumberToExponential, 2, 1) \
F(NumberToPrecision, 2, 1)
F(NumberToPrecision, 2, 1) \
F(IsValidSmi, 1, 1)
#define RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
......
......@@ -1299,7 +1299,11 @@ void LCodeGen::DoMulI(LMulI* instr) {
LOperand* right = instr->right();
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ movl(kScratchRegister, left);
if (instr->hydrogen_value()->representation().IsSmi()) {
__ movq(kScratchRegister, left);
} else {
__ movl(kScratchRegister, left);
}
}
bool can_overflow =
......@@ -1347,14 +1351,14 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
__ SmiToInteger32(left, left);
__ SmiToInteger64(left, left);
__ imul(left, ToOperand(right));
} else {
__ imull(left, ToOperand(right));
}
} else {
if (instr->hydrogen_value()->representation().IsSmi()) {
__ SmiToInteger32(left, left);
__ SmiToInteger64(left, left);
__ imul(left, ToRegister(right));
} else {
__ imull(left, ToRegister(right));
......@@ -1368,9 +1372,15 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Bail out if the result is supposed to be negative zero.
Label done;
__ testl(left, left);
if (instr->hydrogen_value()->representation().IsSmi()) {
__ testq(left, left);
} else {
__ testl(left, left);
}
__ j(not_zero, &done, Label::kNear);
if (right->IsConstantOperand()) {
// Constant can't be represented as Smi due to immediate size limit.
ASSERT(!instr->hydrogen_value()->representation().IsSmi());
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
DeoptimizeIf(no_condition, instr->environment());
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
......@@ -1378,11 +1388,19 @@ void LCodeGen::DoMulI(LMulI* instr) {
DeoptimizeIf(less, instr->environment());
}
} else if (right->IsStackSlot()) {
__ orl(kScratchRegister, ToOperand(right));
if (instr->hydrogen_value()->representation().IsSmi()) {
__ or_(kScratchRegister, ToOperand(right));
} else {
__ orl(kScratchRegister, ToOperand(right));
}
DeoptimizeIf(sign, instr->environment());
} else {
// Test the non-zero operand for negative sign.
__ orl(kScratchRegister, ToRegister(right));
if (instr->hydrogen_value()->representation().IsSmi()) {
__ or_(kScratchRegister, ToRegister(right));
} else {
__ orl(kScratchRegister, ToRegister(right));
}
DeoptimizeIf(sign, instr->environment());
}
__ bind(&done);
......
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --noalways-opt
function mul(a, b) {
return a * b;
}
mul(-1, 2);
mul(-1, 2);
%OptimizeFunctionOnNextCall(mul);
assertEquals(-2, mul(-1, 2));
assertOptimized(mul);
// Deopt on minus zero.
assertEquals(-0, mul(-1, 0));
assertUnoptimized(mul);
function mul2(a, b) {
return a * b;
}
mul2(-1, 2);
mul2(-1, 2);
%OptimizeFunctionOnNextCall(mul2);
// 2^30 is a smi boundary on arm and ia32.
var two_30 = 1 << 30;
// 2^31 is a smi boundary on x64.
var two_31 = 2 * two_30;
if (%IsValidSmi(two_31)) {
// Deopt on two_31 on x64.
assertEquals(two_31, mul2(-two_31, -1));
assertUnoptimized(mul2);
} else {
// Deopt on two_30 on ia32.
assertEquals(two_30, mul2(-two_30, -1));
assertUnoptimized(mul2);
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment