Commit 106a8325 authored by vegorov@chromium.org's avatar vegorov@chromium.org

Fix DoDeferredNumberTagU to keep the value in xmm1 instead of xmm0 on x64.

xmm0 is not saved across runtime call on x64 because MacroAssembler::EnterExitFrameEpilogue preserves only allocatable XMM registers unlike on ia32 where it preserves all registers.

Cleanup handling of shifts: SHR can deoptimize only when its a shift by 0, all other shift never deoptimize.

Fix type inference for i-to-t change instruction. On X64 this ensures that write-barrier is generated correctly.

R=danno@chromium.org

Review URL: https://chromiumcodereview.appspot.com/10868032

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@12373 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 6095c38f
...@@ -713,15 +713,13 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op, ...@@ -713,15 +713,13 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
right = UseRegisterAtStart(right_value); right = UseRegisterAtStart(right_value);
} }
// Shift operations can only deoptimize if we do a logical shift
// by 0 and the result cannot be truncated to int32.
bool does_deopt = false; bool does_deopt = false;
if (op == Token::SHR && constant_value == 0) {
if (FLAG_opt_safe_uint32_operations) { if (FLAG_opt_safe_uint32_operations) {
does_deopt = !instr->CheckFlag(HInstruction::kUint32); does_deopt = !instr->CheckFlag(HInstruction::kUint32);
} else { } else {
// Shift operations can only deoptimize if we do a logical shift
// by 0 and the result cannot be truncated to int32.
bool may_deopt = (op == Token::SHR && constant_value == 0);
if (may_deopt) {
for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) { for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) { if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
does_deopt = true; does_deopt = true;
......
...@@ -1124,6 +1124,7 @@ Range* HChange::InferRange(Zone* zone) { ...@@ -1124,6 +1124,7 @@ Range* HChange::InferRange(Zone* zone) {
Range* input_range = value()->range(); Range* input_range = value()->range();
if (from().IsInteger32() && if (from().IsInteger32() &&
to().IsTagged() && to().IsTagged() &&
!value()->CheckFlag(HInstruction::kUint32) &&
input_range != NULL && input_range->IsInSmiRange()) { input_range != NULL && input_range->IsInSmiRange()) {
set_type(HType::Smi()); set_type(HType::Smi());
} }
......
...@@ -8270,7 +8270,16 @@ HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr, ...@@ -8270,7 +8270,16 @@ HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
case Token::SHR: case Token::SHR:
instr = HShr::NewHShr(zone(), context, left, right); instr = HShr::NewHShr(zone(), context, left, right);
if (FLAG_opt_safe_uint32_operations && instr->IsShr()) { if (FLAG_opt_safe_uint32_operations && instr->IsShr()) {
graph()->RecordUint32Instruction(instr); bool can_be_shift_by_zero = true;
if (right->IsConstant()) {
HConstant* right_const = HConstant::cast(right);
if (right_const->HasInteger32Value() &&
(right_const->Integer32Value() & 0x1f) != 0) {
can_be_shift_by_zero = false;
}
}
if (can_be_shift_by_zero) graph()->RecordUint32Instruction(instr);
} }
break; break;
case Token::SHL: case Token::SHL:
......
...@@ -738,15 +738,13 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op, ...@@ -738,15 +738,13 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
right = UseFixed(right_value, ecx); right = UseFixed(right_value, ecx);
} }
// Shift operations can only deoptimize if we do a logical shift by 0 and
// the result cannot be truncated to int32.
bool does_deopt = false; bool does_deopt = false;
if (FLAG_opt_safe_uint32_operations) { if (op == Token::SHR && constant_value == 0) {
does_deopt = !instr->CheckFlag(HInstruction::kUint32); if (FLAG_opt_safe_uint32_operations) {
} else { does_deopt = !instr->CheckFlag(HInstruction::kUint32);
// Shift operations can only deoptimize if we do a logical shift by 0 and } else {
// the result cannot be truncated to int32.
bool may_deopt = (op == Token::SHR && constant_value == 0 &&
!instr->CheckFlag(HInstruction::kUint32));
if (may_deopt) {
for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) { for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) { if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
does_deopt = true; does_deopt = true;
......
...@@ -4075,7 +4075,10 @@ void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) { ...@@ -4075,7 +4075,10 @@ void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
PushSafepointRegistersScope scope(this); PushSafepointRegistersScope scope(this);
Label done; Label done;
__ LoadUint32(xmm0, reg, xmm1); // Load value into xmm1 which will be preserved across potential call to
// runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
// XMM registers on x64).
__ LoadUint32(xmm1, reg, xmm0);
if (FLAG_inline_new) { if (FLAG_inline_new) {
__ AllocateHeapNumber(reg, tmp, &slow); __ AllocateHeapNumber(reg, tmp, &slow);
...@@ -4093,10 +4096,10 @@ void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) { ...@@ -4093,10 +4096,10 @@ void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
if (!reg.is(rax)) __ movq(reg, rax); if (!reg.is(rax)) __ movq(reg, rax);
// Done. Put the value in xmm0 into the value of the allocated heap // Done. Put the value in xmm1 into the value of the allocated heap
// number. // number.
__ bind(&done); __ bind(&done);
__ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm0); __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm1);
__ StoreToSafepointRegisterSlot(reg, reg); __ StoreToSafepointRegisterSlot(reg, reg);
} }
......
...@@ -721,11 +721,10 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op, ...@@ -721,11 +721,10 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
// Shift operations can only deoptimize if we do a logical shift by 0 and // Shift operations can only deoptimize if we do a logical shift by 0 and
// the result cannot be truncated to int32. // the result cannot be truncated to int32.
bool does_deopt = false; bool does_deopt = false;
if (FLAG_opt_safe_uint32_operations) { if (op == Token::SHR && constant_value == 0) {
does_deopt = !instr->CheckFlag(HInstruction::kUint32); if (FLAG_opt_safe_uint32_operations) {
} else { does_deopt = !instr->CheckFlag(HInstruction::kUint32);
bool may_deopt = (op == Token::SHR && constant_value == 0); } else {
if (may_deopt) {
for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) { for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) { if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
does_deopt = true; does_deopt = true;
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax // Flags: --allow-natives-syntax --expose-gc
// Test uint32 handing in optimized frames. // Test uint32 handing in optimized frames.
...@@ -44,7 +44,10 @@ assertEquals(K1, ChangeI2T(uint32_array, 0)); ...@@ -44,7 +44,10 @@ assertEquals(K1, ChangeI2T(uint32_array, 0));
assertEquals(K2, ChangeI2T(uint32_array, 1)); assertEquals(K2, ChangeI2T(uint32_array, 1));
%OptimizeFunctionOnNextCall(ChangeI2T); %OptimizeFunctionOnNextCall(ChangeI2T);
assertEquals(K1, ChangeI2T(uint32_array, 0)); assertEquals(K1, ChangeI2T(uint32_array, 0));
assertEquals(K2, ChangeI2T(uint32_array, 1)); // Loop to force inline allocation failure and a call into runtime.
for (var i = 0; i < 80000; i++) {
assertEquals(K2, ChangeI2T(uint32_array, 1));
}
function SideEffect() { function SideEffect() {
with ({}) { } // not inlinable with ({}) { } // not inlinable
...@@ -148,3 +151,23 @@ assertEquals(2, PhiOfPhiUnsafe(1)); ...@@ -148,3 +151,23 @@ assertEquals(2, PhiOfPhiUnsafe(1));
assertEquals(2, PhiOfPhiUnsafe(1)); assertEquals(2, PhiOfPhiUnsafe(1));
%OptimizeFunctionOnNextCall(PhiOfPhiUnsafe); %OptimizeFunctionOnNextCall(PhiOfPhiUnsafe);
assertEquals(2 * K3, PhiOfPhiUnsafe(K3)); assertEquals(2 * K3, PhiOfPhiUnsafe(K3));
var old_array = new Array(1000);
for (var i = 0; i < old_array.length; i++) old_array[i] = null;
// Force promotion.
gc();
gc();
function FillOldArrayWithHeapNumbers(N) {
for (var i = 0; i < N; i++) {
old_array[i] = uint32_array[1];
}
}
FillOldArrayWithHeapNumbers(1);
FillOldArrayWithHeapNumbers(1);
%OptimizeFunctionOnNextCall(FillOldArrayWithHeapNumbers);
FillOldArrayWithHeapNumbers(old_array.length);
gc();
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment