ARM: Tweak StoreKeyed.

Avoid corrupting its input in some cases.

BUG=none
TEST=test/mjsunit/lithium/StoreKeyed*.js
R=ulan@chromium.org

Review URL: https://codereview.chromium.org/23600054

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@16771 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent f3434003
......@@ -2223,8 +2223,6 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
if (!instr->is_external()) {
ASSERT(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
......@@ -2234,15 +2232,19 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
if (instr->value()->representation().IsDouble()) {
object = UseRegisterAtStart(instr->elements());
val = UseTempRegister(instr->value());
val = UseRegister(instr->value());
key = UseRegisterOrConstantAtStart(instr->key());
} else {
ASSERT(instr->value()->representation().IsSmiOrTagged());
object = UseTempRegister(instr->elements());
val = needs_write_barrier ? UseTempRegister(instr->value())
: UseRegisterAtStart(instr->value());
key = needs_write_barrier ? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
if (needs_write_barrier) {
object = UseTempRegister(instr->elements());
val = UseTempRegister(instr->value());
key = UseTempRegister(instr->key());
} else {
object = UseRegisterAtStart(instr->elements());
val = UseRegisterAtStart(instr->value());
key = UseRegisterOrConstantAtStart(instr->key());
}
}
return new(zone()) LStoreKeyed(object, key, val);
......@@ -2250,17 +2252,13 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ASSERT(
(instr->value()->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
(instr->elements_kind() != EXTERNAL_FLOAT_ELEMENTS) &&
(instr->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS)) ||
(instr->value()->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
((instr->elements_kind() == EXTERNAL_FLOAT_ELEMENTS) ||
(instr->elements_kind() == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->elements()->representation().IsExternal());
bool val_is_temp_register =
elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
elements_kind == EXTERNAL_FLOAT_ELEMENTS;
LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
: UseRegister(instr->value());
LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* external_pointer = UseRegister(instr->elements());
return new(zone()) LStoreKeyed(external_pointer, key, val);
......
......@@ -4312,16 +4312,23 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
Register address = scratch0();
DwVfpRegister value(ToDoubleRegister(instr->value()));
Operand operand(key_is_constant
? Operand(constant_key << element_size_shift)
: Operand(key, LSL, shift_size));
__ add(scratch0(), external_pointer, operand);
if (key_is_constant) {
if (constant_key != 0) {
__ add(address, external_pointer,
Operand(constant_key << element_size_shift));
} else {
address = external_pointer;
}
} else {
__ add(address, external_pointer, Operand(key, LSL, shift_size));
}
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vcvt_f32_f64(double_scratch0().low(), value);
__ vstr(double_scratch0().low(), scratch0(), additional_offset);
__ vstr(double_scratch0().low(), address, additional_offset);
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vstr(value, scratch0(), additional_offset);
__ vstr(value, address, additional_offset);
}
} else {
Register value(ToRegister(instr->value()));
......@@ -4363,32 +4370,28 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
DwVfpRegister value = ToDoubleRegister(instr->value());
Register elements = ToRegister(instr->elements());
Register key = no_reg;
Register scratch = scratch0();
DwVfpRegister double_scratch = double_scratch0();
bool key_is_constant = instr->key()->IsConstantOperand();
int constant_key = 0;
// Calculate the effective address of the slot in the array to store the
// double value.
int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
__ add(scratch, elements,
Operand((constant_key << element_size_shift) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag));
} else {
key = ToRegister(instr->key());
}
int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
Operand operand = key_is_constant
? Operand((constant_key << element_size_shift) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag)
: Operand(key, LSL, shift_size);
__ add(scratch, elements, operand);
if (!key_is_constant) {
__ add(scratch, scratch,
int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
__ add(scratch, elements,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
__ add(scratch, scratch,
Operand(ToRegister(instr->key()), LSL, shift_size));
}
if (instr->NeedsCanonicalization()) {
......@@ -4398,9 +4401,12 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
__ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
__ Assert(ne, kDefaultNaNModeNotSet);
}
__ VFPCanonicalizeNaN(value);
__ VFPCanonicalizeNaN(double_scratch, value);
__ vstr(double_scratch, scratch,
instr->additional_index() << element_size_shift);
} else {
__ vstr(value, scratch, instr->additional_index() << element_size_shift);
}
__ vstr(value, scratch, instr->additional_index() << element_size_shift);
}
......
......@@ -733,9 +733,11 @@ void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
bind(&fpscr_done);
}
void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister value,
void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
vsub(value, value, kDoubleRegZero, cond);
vsub(dst, src, kDoubleRegZero, cond);
}
......
......@@ -469,8 +469,13 @@ class MacroAssembler: public Assembler {
void VFPEnsureFPSCRState(Register scratch);
// If the value is a NaN, canonicalize the value else, do nothing.
void VFPCanonicalizeNaN(const DwVfpRegister value,
void VFPCanonicalizeNaN(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
void VFPCanonicalizeNaN(const DwVfpRegister value,
const Condition cond = al) {
VFPCanonicalizeNaN(value, value, cond);
}
// Compare double values and move the result to the normal condition flags.
void VFPCompareAndSetFlags(const DwVfpRegister src1,
......
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --no-use-osr
function foo(a, i, v) {
a[0] = v;
a[i] = v;
}
function foo_int(a, i, v) {
a[0] = v;
a[i] = v;
}
var A1 = [1.2, 2.3];
var A2 = [1.2, 2.3];
var A3 = [1.2, 2.3];
var A1_int = [12, 23];
var A2_int = [12, 23];
var A3_int = [12, 23];
foo(A1, 1, 3.4);
foo(A2, 1, 3.4);
%OptimizeFunctionOnNextCall(foo);
foo(A3, 1, 3.4);
foo_int(A1_int, 1, 34);
foo_int(A2_int, 1, 34);
%OptimizeFunctionOnNextCall(foo_int);
foo_int(A3_int, 1, 34);
assertEquals(A1[0], A3[0]);
assertEquals(A1[1], A3[1]);
assertEquals(A1_int[0], A3_int[0]);
assertEquals(A1_int[1], A3_int[1]);
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --no-use-osr
function foo_pixel(a, i, v) {
a[0] = v;
a[i] = v;
}
function foo_uint16(a, i, v) {
a[0] = v;
a[i] = v;
}
function foo_uint32(a, i, v) {
a[0] = v;
a[i] = v;
}
function foo_float(a, i, v) {
a[0] = v;
a[i] = v;
}
function foo_double(a, i, v) {
a[0] = v;
a[i] = v;
}
var A1_pixel = new Uint8ClampedArray(2);
var A2_pixel = new Uint8ClampedArray(2);
var A3_pixel = new Uint8ClampedArray(2);
var A1_uint16 = new Uint16Array(2);
var A2_uint16 = new Uint16Array(2);
var A3_uint16 = new Uint16Array(2);
var A1_uint32 = new Uint32Array(2);
var A2_uint32 = new Uint32Array(2);
var A3_uint32 = new Uint32Array(2);
var A1_float = new Float32Array(2);
var A2_float = new Float32Array(2);
var A3_float = new Float32Array(2);
var A1_double = new Float64Array(2);
var A2_double = new Float64Array(2);
var A3_double = new Float64Array(2);
foo_pixel(A1_pixel, 1, 34);
foo_pixel(A2_pixel, 1, 34);
%OptimizeFunctionOnNextCall(foo_pixel);
foo_pixel(A3_pixel, 1, 34);
foo_uint16(A1_uint16, 1, 3.4);
foo_uint16(A2_uint16, 1, 3.4);
%OptimizeFunctionOnNextCall(foo_uint16);
foo_uint16(A3_uint16, 1, 3.4);
foo_uint32(A1_uint32, 1, 3.4);
foo_uint32(A2_uint32, 1, 3.4);
%OptimizeFunctionOnNextCall(foo_uint32);
foo_uint32(A3_uint32, 1, 3.4);
foo_float(A1_float, 1, 3.4);
foo_float(A2_float, 1, 3.4);
%OptimizeFunctionOnNextCall(foo_float);
foo_float(A3_float, 1, 3.4);
foo_double(A1_double, 1, 3.4);
foo_double(A2_double, 1, 3.4);
%OptimizeFunctionOnNextCall(foo_double);
foo_double(A3_double, 1, 3.4);
assertEquals(A1_pixel[0], A3_pixel[0]);
assertEquals(A1_pixel[1], A3_pixel[1]);
assertEquals(A1_uint16[0], A3_uint16[0]);
assertEquals(A1_uint16[1], A3_uint16[1]);
assertEquals(A1_uint32[0], A3_uint32[0]);
assertEquals(A1_uint32[1], A3_uint32[1]);
assertEquals(A1_float[0], A3_float[0]);
assertEquals(A1_float[1], A3_float[1]);
assertEquals(A1_double[0], A3_double[0]);
assertEquals(A1_double[1], A3_double[1]);
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment