Commit cc278040 authored by yangguo@chromium.org's avatar yangguo@chromium.org

Ensure using byte registers for byte instructions on ia32 and x64.

BUG=v8:1945
TEST=regress-1945.js

Review URL: https://chromiumcodereview.appspot.com/9418005

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10719 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent dd95fee0
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
// The original source code covered by the above license above has been modified // The original source code covered by the above license above has been modified
// significantly by Google Inc. // significantly by Google Inc.
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
#include "v8.h" #include "v8.h"
...@@ -575,7 +575,7 @@ void Assembler::leave() { ...@@ -575,7 +575,7 @@ void Assembler::leave() {
void Assembler::mov_b(Register dst, const Operand& src) { void Assembler::mov_b(Register dst, const Operand& src) {
ASSERT(dst.code() < 4); CHECK(dst.is_byte_register());
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
EMIT(0x8A); EMIT(0x8A);
emit_operand(dst, src); emit_operand(dst, src);
...@@ -591,7 +591,7 @@ void Assembler::mov_b(const Operand& dst, int8_t imm8) { ...@@ -591,7 +591,7 @@ void Assembler::mov_b(const Operand& dst, int8_t imm8) {
void Assembler::mov_b(const Operand& dst, Register src) { void Assembler::mov_b(const Operand& dst, Register src) {
ASSERT(src.code() < 4); CHECK(src.is_byte_register());
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
EMIT(0x88); EMIT(0x88);
emit_operand(src, dst); emit_operand(src, dst);
...@@ -829,7 +829,7 @@ void Assembler::cmpb(const Operand& op, int8_t imm8) { ...@@ -829,7 +829,7 @@ void Assembler::cmpb(const Operand& op, int8_t imm8) {
void Assembler::cmpb(const Operand& op, Register reg) { void Assembler::cmpb(const Operand& op, Register reg) {
ASSERT(reg.is_byte_register()); CHECK(reg.is_byte_register());
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
EMIT(0x38); EMIT(0x38);
emit_operand(reg, op); emit_operand(reg, op);
...@@ -837,7 +837,7 @@ void Assembler::cmpb(const Operand& op, Register reg) { ...@@ -837,7 +837,7 @@ void Assembler::cmpb(const Operand& op, Register reg) {
void Assembler::cmpb(Register reg, const Operand& op) { void Assembler::cmpb(Register reg, const Operand& op) {
ASSERT(reg.is_byte_register()); CHECK(reg.is_byte_register());
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
EMIT(0x3A); EMIT(0x3A);
emit_operand(reg, op); emit_operand(reg, op);
...@@ -901,6 +901,7 @@ void Assembler::cmpw_ax(const Operand& op) { ...@@ -901,6 +901,7 @@ void Assembler::cmpw_ax(const Operand& op) {
void Assembler::dec_b(Register dst) { void Assembler::dec_b(Register dst) {
CHECK(dst.is_byte_register());
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
EMIT(0xFE); EMIT(0xFE);
EMIT(0xC8 | dst.code()); EMIT(0xC8 | dst.code());
...@@ -1174,7 +1175,9 @@ void Assembler::test(Register reg, const Immediate& imm) { ...@@ -1174,7 +1175,9 @@ void Assembler::test(Register reg, const Immediate& imm) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
// Only use test against byte for registers that have a byte // Only use test against byte for registers that have a byte
// variant: eax, ebx, ecx, and edx. // variant: eax, ebx, ecx, and edx.
if (imm.rmode_ == RelocInfo::NONE && is_uint8(imm.x_) && reg.code() < 4) { if (imm.rmode_ == RelocInfo::NONE &&
is_uint8(imm.x_) &&
reg.is_byte_register()) {
uint8_t imm8 = imm.x_; uint8_t imm8 = imm.x_;
if (reg.is(eax)) { if (reg.is(eax)) {
EMIT(0xA8); EMIT(0xA8);
...@@ -1204,6 +1207,7 @@ void Assembler::test(Register reg, const Operand& op) { ...@@ -1204,6 +1207,7 @@ void Assembler::test(Register reg, const Operand& op) {
void Assembler::test_b(Register reg, const Operand& op) { void Assembler::test_b(Register reg, const Operand& op) {
CHECK(reg.is_byte_register());
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
EMIT(0x84); EMIT(0x84);
emit_operand(reg, op); emit_operand(reg, op);
...@@ -1219,7 +1223,7 @@ void Assembler::test(const Operand& op, const Immediate& imm) { ...@@ -1219,7 +1223,7 @@ void Assembler::test(const Operand& op, const Immediate& imm) {
void Assembler::test_b(const Operand& op, uint8_t imm8) { void Assembler::test_b(const Operand& op, uint8_t imm8) {
if (op.is_reg_only() && op.reg().code() >= 4) { if (op.is_reg_only() && !op.reg().is_byte_register()) {
test(op, Immediate(imm8)); test(op, Immediate(imm8));
return; return;
} }
......
...@@ -1868,10 +1868,9 @@ void LCodeGen::EmitClassOfTest(Label* is_true, ...@@ -1868,10 +1868,9 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// Faster code path to avoid two compares: subtract lower bound from the // Faster code path to avoid two compares: subtract lower bound from the
// actual type and do a signed compare with the width of the type range. // actual type and do a signed compare with the width of the type range.
__ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
__ mov(temp2, FieldOperand(temp, Map::kInstanceTypeOffset)); __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
__ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ cmpb(Operand(temp2), __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
static_cast<int8_t>(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(above, is_false); __ j(above, is_false);
} }
...@@ -4079,7 +4078,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { ...@@ -4079,7 +4078,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
} else { } else {
__ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset)); __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
__ and_(temp, mask); __ and_(temp, mask);
__ cmpb(Operand(temp), tag); __ cmp(temp, tag);
DeoptimizeIf(not_equal, instr->environment()); DeoptimizeIf(not_equal, instr->environment());
} }
} }
......
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
...@@ -775,7 +775,7 @@ void Assembler::immediate_arithmetic_op_8(byte subcode, ...@@ -775,7 +775,7 @@ void Assembler::immediate_arithmetic_op_8(byte subcode,
Register dst, Register dst,
Immediate src) { Immediate src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
if (dst.code() > 3) { if (!dst.is_byte_register()) {
// Use 64-bit mode byte registers. // Use 64-bit mode byte registers.
emit_rex_64(dst); emit_rex_64(dst);
} }
...@@ -1059,7 +1059,7 @@ void Assembler::decl(const Operand& dst) { ...@@ -1059,7 +1059,7 @@ void Assembler::decl(const Operand& dst) {
void Assembler::decb(Register dst) { void Assembler::decb(Register dst) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
if (dst.code() > 3) { if (!dst.is_byte_register()) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX. // Register is not one of al, bl, cl, dl. Its encoding needs REX.
emit_rex_32(dst); emit_rex_32(dst);
} }
...@@ -1387,7 +1387,7 @@ void Assembler::leave() { ...@@ -1387,7 +1387,7 @@ void Assembler::leave() {
void Assembler::movb(Register dst, const Operand& src) { void Assembler::movb(Register dst, const Operand& src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
if (dst.code() > 3) { if (!dst.is_byte_register()) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX. // Register is not one of al, bl, cl, dl. Its encoding needs REX.
emit_rex_32(dst, src); emit_rex_32(dst, src);
} else { } else {
...@@ -1400,7 +1400,7 @@ void Assembler::movb(Register dst, const Operand& src) { ...@@ -1400,7 +1400,7 @@ void Assembler::movb(Register dst, const Operand& src) {
void Assembler::movb(Register dst, Immediate imm) { void Assembler::movb(Register dst, Immediate imm) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
if (dst.code() > 3) { if (!dst.is_byte_register()) {
emit_rex_32(dst); emit_rex_32(dst);
} }
emit(0xB0 + dst.low_bits()); emit(0xB0 + dst.low_bits());
...@@ -1410,7 +1410,7 @@ void Assembler::movb(Register dst, Immediate imm) { ...@@ -1410,7 +1410,7 @@ void Assembler::movb(Register dst, Immediate imm) {
void Assembler::movb(const Operand& dst, Register src) { void Assembler::movb(const Operand& dst, Register src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
if (src.code() > 3) { if (!src.is_byte_register()) {
emit_rex_32(src, dst); emit_rex_32(src, dst);
} else { } else {
emit_optional_rex_32(src, dst); emit_optional_rex_32(src, dst);
...@@ -1931,7 +1931,7 @@ void Assembler::setcc(Condition cc, Register reg) { ...@@ -1931,7 +1931,7 @@ void Assembler::setcc(Condition cc, Register reg) {
} }
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
ASSERT(is_uint4(cc)); ASSERT(is_uint4(cc));
if (reg.code() > 3) { // Use x64 byte registers, where different. if (!reg.is_byte_register()) { // Use x64 byte registers, where different.
emit_rex_32(reg); emit_rex_32(reg);
} }
emit(0x0F); emit(0x0F);
...@@ -1996,7 +1996,7 @@ void Assembler::testb(Register dst, Register src) { ...@@ -1996,7 +1996,7 @@ void Assembler::testb(Register dst, Register src) {
emit(0x84); emit(0x84);
emit_modrm(src, dst); emit_modrm(src, dst);
} else { } else {
if (dst.code() > 3 || src.code() > 3) { if (!dst.is_byte_register() || !src.is_byte_register()) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX. // Register is not one of al, bl, cl, dl. Its encoding needs REX.
emit_rex_32(dst, src); emit_rex_32(dst, src);
} }
...@@ -2013,7 +2013,7 @@ void Assembler::testb(Register reg, Immediate mask) { ...@@ -2013,7 +2013,7 @@ void Assembler::testb(Register reg, Immediate mask) {
emit(0xA8); emit(0xA8);
emit(mask.value_); // Low byte emitted. emit(mask.value_); // Low byte emitted.
} else { } else {
if (reg.code() > 3) { if (!reg.is_byte_register()) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX. // Register is not one of al, bl, cl, dl. Its encoding needs REX.
emit_rex_32(reg); emit_rex_32(reg);
} }
...@@ -2036,7 +2036,7 @@ void Assembler::testb(const Operand& op, Immediate mask) { ...@@ -2036,7 +2036,7 @@ void Assembler::testb(const Operand& op, Immediate mask) {
void Assembler::testb(const Operand& op, Register reg) { void Assembler::testb(const Operand& op, Register reg) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
if (reg.code() > 3) { if (!reg.is_byte_register()) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX. // Register is not one of al, bl, cl, dl. Its encoding needs REX.
emit_rex_32(reg, op); emit_rex_32(reg, op);
} else { } else {
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
// The original source code covered by the above license above has been // The original source code covered by the above license above has been
// modified significantly by Google Inc. // modified significantly by Google Inc.
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// A lightweight X64 Assembler. // A lightweight X64 Assembler.
...@@ -131,6 +131,8 @@ struct Register { ...@@ -131,6 +131,8 @@ struct Register {
} }
bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
bool is(Register reg) const { return code_ == reg.code_; } bool is(Register reg) const { return code_ == reg.code_; }
// rax, rbx, rcx and rdx are byte registers, the rest are not.
bool is_byte_register() const { return code_ <= 3; }
int code() const { int code() const {
ASSERT(is_valid()); ASSERT(is_valid());
return code_; return code_;
......
...@@ -1793,11 +1793,10 @@ void LCodeGen::EmitClassOfTest(Label* is_true, ...@@ -1793,11 +1793,10 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// Faster code path to avoid two compares: subtract lower bound from the // Faster code path to avoid two compares: subtract lower bound from the
// actual type and do a signed compare with the width of the type range. // actual type and do a signed compare with the width of the type range.
__ movq(temp, FieldOperand(input, HeapObject::kMapOffset)); __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
__ movq(temp2, FieldOperand(temp, Map::kInstanceTypeOffset)); __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
__ subb(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); __ subq(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ cmpb(temp2, __ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
Immediate(static_cast<int8_t>(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)));
__ j(above, is_false); __ j(above, is_false);
} }
......
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax
var _d = new Date();
_d.setHours(0,0,0,0);
_d.setHours(0,0,0,0);
%OptimizeFunctionOnNextCall(_d.setHours);
_d.setHours(0,0,0,0);
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment