Commit d123f30b authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[ptr-compr] Support 31-bit Smis in lower half-word on 64-bit architectures.

This CL introduces a new gn argument: v8_enable_pointer_compression which is
false by default. All the changes done in this CL are made under this flag.

Upper half-word of a Smi word must be properly sign-extended according to the
sign of the lower-half containing the actual Smi value.

Bug: v8:7703
Cq-Include-Trybots: luci.chromium.try:linux_chromium_rel_ng
Change-Id: I2b52ab49cd18c7c613130705de445fef44c30ac5
Reviewed-on: https://chromium-review.googlesource.com/1061175Reviewed-by: 's avatarYang Guo <yangguo@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Reviewed-by: 's avatarJaroslav Sevcik <jarin@chromium.org>
Reviewed-by: 's avatarBen Titzer <titzer@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#53519}
parent 4ce2adc3
......@@ -80,6 +80,9 @@ declare_args() {
# Enable code-generation-time checking of types in the CodeStubAssembler.
v8_enable_verify_csa = false
# Enable pointer compression (sets -dV8_COMPRESS_POINTERS).
v8_enable_pointer_compression = false
# Interpreted regexp engine exists as platform-independent alternative
# based where the regular expression is compiled to a bytecode.
v8_interpreted_regexp = false
......@@ -292,6 +295,9 @@ config("features") {
if (v8_enable_minor_mc) {
defines += [ "ENABLE_MINOR_MC" ]
}
if (v8_enable_pointer_compression) {
defines += [ "V8_COMPRESS_POINTERS" ]
}
if (v8_enable_object_print) {
defines += [ "OBJECT_PRINT" ]
}
......
......@@ -175,18 +175,18 @@ const int kSmiTag = 0;
const int kSmiTagSize = 1;
const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
template <size_t ptr_size>
template <size_t tagged_ptr_size>
struct SmiTagging;
template <int kSmiShiftSize>
V8_INLINE internal::Object* IntToSmi(int value) {
int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
uintptr_t tagged_value =
(static_cast<uintptr_t>(value) << smi_shift_bits) | kSmiTag;
intptr_t tagged_value =
(static_cast<intptr_t>(value) << smi_shift_bits) | kSmiTag;
return reinterpret_cast<internal::Object*>(tagged_value);
}
// Smi constants for 32-bit systems.
// Smi constants for systems where tagged pointer is a 32-bit value.
template <>
struct SmiTagging<4> {
enum { kSmiShiftSize = 0, kSmiValueSize = 31 };
......@@ -216,7 +216,7 @@ struct SmiTagging<4> {
}
};
// Smi constants for 64-bit systems.
// Smi constants for systems where tagged pointer is a 64-bit value.
template <>
struct SmiTagging<8> {
enum { kSmiShiftSize = 31, kSmiValueSize = 32 };
......@@ -236,7 +236,15 @@ struct SmiTagging<8> {
}
};
#if V8_COMPRESS_POINTERS
static_assert(
kApiPointerSize == kApiInt64Size,
"Pointer compression can be enabled only for 64-bit architectures");
typedef SmiTagging<4> PlatformSmiTagging;
#else
typedef SmiTagging<kApiPointerSize> PlatformSmiTagging;
#endif
const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize;
const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize;
const int kSmiMinValue = (static_cast<unsigned int>(-1)) << (kSmiValueSize - 1);
......
......@@ -386,18 +386,16 @@ unsigned Operand::shift_amount() const {
Operand Operand::UntagSmi(Register smi) {
STATIC_ASSERT(kXRegSizeInBits == static_cast<unsigned>(kSmiShift +
kSmiValueSize));
DCHECK(smi.Is64Bits());
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
return Operand(smi, ASR, kSmiShift);
}
Operand Operand::UntagSmiAndScale(Register smi, int scale) {
STATIC_ASSERT(kXRegSizeInBits == static_cast<unsigned>(kSmiShift +
kSmiValueSize));
DCHECK(smi.Is64Bits());
DCHECK((scale >= 0) && (scale <= (64 - kSmiValueSize)));
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
if (scale > kSmiShift) {
return Operand(smi, LSL, scale - kSmiShift);
} else if (scale < kSmiShift) {
......
......@@ -1034,40 +1034,43 @@ void TurboAssembler::InitializeRootRegister() {
void MacroAssembler::SmiTag(Register dst, Register src) {
STATIC_ASSERT(kXRegSizeInBits ==
static_cast<unsigned>(kSmiShift + kSmiValueSize));
DCHECK(dst.Is64Bits() && src.Is64Bits());
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
Lsl(dst, src, kSmiShift);
}
void MacroAssembler::SmiTag(Register smi) { SmiTag(smi, smi); }
void TurboAssembler::SmiUntag(Register dst, Register src) {
STATIC_ASSERT(kXRegSizeInBits ==
static_cast<unsigned>(kSmiShift + kSmiValueSize));
DCHECK(dst.Is64Bits() && src.Is64Bits());
if (FLAG_enable_slow_asserts) {
AssertSmi(src);
}
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
Asr(dst, src, kSmiShift);
}
void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
STATIC_ASSERT(kXRegSizeInBits ==
static_cast<unsigned>(kSmiShift + kSmiValueSize));
DCHECK(dst.Is64Bits());
if (src.IsImmediateOffset() && src.shift_amount() == 0) {
if (FLAG_enable_slow_asserts) {
if (SmiValuesAre32Bits()) {
if (src.IsImmediateOffset() && src.shift_amount() == 0) {
if (FLAG_enable_slow_asserts) {
Ldr(dst, src);
AssertSmi(dst);
}
// Load value directly from the upper half-word.
// Assumes that Smis are shifted by 32 bits and little endianness.
DCHECK_EQ(kSmiShift, 32);
Ldrsw(dst,
MemOperand(src.base(), src.offset() + (kSmiShift / kBitsPerByte),
src.addrmode()));
} else {
Ldr(dst, src);
AssertSmi(dst);
SmiUntag(dst);
}
// Load value directly from the upper half-word.
// Assumes that Smis are shifted by 32 bits and little endianness.
DCHECK_EQ(kSmiShift, 32);
Ldrsw(dst, MemOperand(src.base(), src.offset() + (kSmiShift / kBitsPerByte),
src.addrmode()));
} else {
DCHECK(SmiValuesAre31Bits());
Ldr(dst, src);
SmiUntag(dst);
}
......
......@@ -3246,7 +3246,7 @@ void Simulator::Debug() {
current_heap->ContainsSlow(obj->address())) {
PrintF(" (");
if ((value & kSmiTagMask) == 0) {
STATIC_ASSERT(kSmiValueSize == 32);
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
int32_t untagged = (value >> kSmiShift) & 0xFFFFFFFF;
PrintF("smi %" PRId32, untagged);
} else {
......
This diff is collapsed.
This diff is collapsed.
......@@ -479,6 +479,7 @@ class SloppyTNode : public TNode<T> {
V(Int32Add, Word32T, Word32T, Word32T) \
V(Int32AddWithOverflow, PAIR_TYPE(Int32T, BoolT), Int32T, Int32T) \
V(Int32Sub, Word32T, Word32T, Word32T) \
V(Int32SubWithOverflow, PAIR_TYPE(Int32T, BoolT), Int32T, Int32T) \
V(Int32Mul, Word32T, Word32T, Word32T) \
V(Int32MulWithOverflow, PAIR_TYPE(Int32T, BoolT), Int32T, Int32T) \
V(Int32Div, Int32T, Int32T, Int32T) \
......
......@@ -1038,14 +1038,16 @@ Node* EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node) {
__ Bind(&if_smi);
}
if (machine()->Is64()) {
if (SmiValuesAre32Bits()) {
Node* value_smi = ChangeInt32ToSmi(value32);
__ Goto(&done, value_smi);
} else {
DCHECK(SmiValuesAre31Bits());
Node* add = __ Int32AddWithOverflow(value32, value32);
Node* ovf = __ Projection(1, add);
__ GotoIf(ovf, &if_heapnumber);
Node* value_smi = __ Projection(0, add);
value_smi = ChangeInt32ToIntPtr(value_smi);
__ Goto(&done, value_smi);
}
}
......@@ -1089,9 +1091,10 @@ Node* EffectControlLinearizer::LowerChangeInt31ToTaggedSigned(Node* node) {
Node* EffectControlLinearizer::LowerChangeInt32ToTagged(Node* node) {
Node* value = node->InputAt(0);
if (machine()->Is64()) {
if (SmiValuesAre32Bits()) {
return ChangeInt32ToSmi(value);
}
DCHECK(SmiValuesAre31Bits());
auto if_overflow = __ MakeDeferredLabel();
auto done = __ MakeLabel(MachineRepresentation::kTagged);
......@@ -1099,7 +1102,9 @@ Node* EffectControlLinearizer::LowerChangeInt32ToTagged(Node* node) {
Node* add = __ Int32AddWithOverflow(value, value);
Node* ovf = __ Projection(1, add);
__ GotoIf(ovf, &if_overflow);
__ Goto(&done, __ Projection(0, add));
Node* value_smi = __ Projection(0, add);
value_smi = ChangeInt32ToIntPtr(value_smi);
__ Goto(&done, value_smi);
__ Bind(&if_overflow);
Node* number = AllocateHeapNumberWithValue(__ ChangeInt32ToFloat64(value));
......@@ -1789,7 +1794,9 @@ Node* EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned(
Node* check = __ Projection(1, add);
__ DeoptimizeIf(DeoptimizeReason::kOverflow, params.feedback(), check,
frame_state);
return __ Projection(0, add);
Node* result = __ Projection(0, add);
result = ChangeInt32ToIntPtr(result);
return result;
}
Node* EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
......
......@@ -2727,10 +2727,15 @@ bool CanCover(Node* value, IrOpcode::Value opcode) {
return true;
}
Node* WasmGraphBuilder::BuildChangeInt32ToSmi(Node* value) {
Node* WasmGraphBuilder::BuildChangeInt32ToIntPtr(Node* value) {
if (mcgraph()->machine()->Is64()) {
value = graph()->NewNode(mcgraph()->machine()->ChangeInt32ToInt64(), value);
}
return value;
}
Node* WasmGraphBuilder::BuildChangeInt32ToSmi(Node* value) {
value = BuildChangeInt32ToIntPtr(value);
return graph()->NewNode(mcgraph()->machine()->WordShl(), value,
BuildSmiShiftBitsConstant());
}
......@@ -4056,9 +4061,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
MachineOperatorBuilder* machine = mcgraph()->machine();
CommonOperatorBuilder* common = mcgraph()->common();
if (machine->Is64()) {
if (SmiValuesAre32Bits()) {
return BuildChangeInt32ToSmi(value);
}
DCHECK(SmiValuesAre31Bits());
Node* effect = *effect_;
Node* control = *control_;
......@@ -4076,6 +4082,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* if_false = graph()->NewNode(common->IfFalse(), branch);
Node* vfalse = graph()->NewNode(common->Projection(0), add, if_false);
vfalse = BuildChangeInt32ToIntPtr(vfalse);
Node* merge = graph()->NewNode(common->Merge(2), if_true, if_false);
Node* phi = graph()->NewNode(common->Phi(MachineRepresentation::kTagged, 2),
......@@ -4129,9 +4136,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// On 64-bit machines we can just wrap the 32-bit integer in a smi, for
// 32-bit machines we need to deal with potential overflow and fallback to
// boxing.
if (machine->Is64()) {
if (SmiValuesAre32Bits()) {
vsmi = BuildChangeInt32ToSmi(value32);
} else {
DCHECK(SmiValuesAre31Bits());
Node* smi_tag = graph()->NewNode(machine->Int32AddWithOverflow(), value32,
value32, if_smi);
......@@ -4145,6 +4153,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
if_smi = graph()->NewNode(common->IfFalse(), branch_ovf);
vsmi = graph()->NewNode(common->Projection(0), smi_tag, if_smi);
vsmi = BuildChangeInt32ToIntPtr(vsmi);
}
// Allocate the box for the {value}.
......
......@@ -448,6 +448,7 @@ class WasmGraphBuilder {
MachineType result_type, wasm::TrapReason trap_zero,
wasm::WasmCodePosition position);
Node* BuildChangeInt32ToIntPtr(Node* value);
Node* BuildChangeInt32ToSmi(Node* value);
Node* BuildChangeUint31ToSmi(Node* value);
Node* BuildSmiShiftBitsConstant();
......
......@@ -405,8 +405,26 @@ inline std::ostream& operator<<(std::ostream& os,
UNREACHABLE();
}
static_assert(kSmiValueSize <= 32, "Unsupported Smi tagging scheme");
// Smi sign bit position must be 32-bit aligned so we can use sign extension
// instructions on 64-bit architectures without additional shifts.
static_assert((kSmiValueSize + kSmiShiftSize + kSmiTagSize) % 32 == 0,
"Unsupported Smi tagging scheme");
constexpr bool kIsSmiValueInUpper32Bits =
(kSmiValueSize + kSmiShiftSize + kSmiTagSize) == 64;
constexpr bool kIsSmiValueInLower32Bits =
(kSmiValueSize + kSmiShiftSize + kSmiTagSize) == 32;
static_assert(!SmiValuesAre32Bits() == SmiValuesAre31Bits(),
"Unsupported Smi tagging scheme");
static_assert(SmiValuesAre32Bits() == kIsSmiValueInUpper32Bits,
"Unsupported Smi tagging scheme");
static_assert(SmiValuesAre31Bits() == kIsSmiValueInLower32Bits,
"Unsupported Smi tagging scheme");
// Mask for the sign bit in a smi.
constexpr intptr_t kSmiSignMask = kIntptrSignBit;
constexpr intptr_t kSmiSignMask = static_cast<intptr_t>(
uintptr_t{1} << (kSmiValueSize + kSmiShiftSize + kSmiTagSize - 1));
constexpr int kObjectAlignmentBits = kPointerSizeLog2;
constexpr intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
......
......@@ -18,7 +18,7 @@ LayoutDescriptor* LayoutDescriptor::FromSmi(Smi* smi) {
Handle<LayoutDescriptor> LayoutDescriptor::New(Isolate* isolate, int length) {
if (length <= kSmiValueSize) {
if (length <= kBitsInSmiLayout) {
// The whole bit vector fits into a smi.
return handle(LayoutDescriptor::FromSmi(Smi::kZero), isolate);
}
......@@ -130,7 +130,7 @@ bool LayoutDescriptor::IsSlowLayout() { return !IsSmi(); }
int LayoutDescriptor::capacity() {
return IsSlowLayout() ? (length() * kBitsPerByte) : kSmiValueSize;
return IsSlowLayout() ? (length() * kBitsPerByte) : kBitsInSmiLayout;
}
......@@ -161,10 +161,10 @@ int LayoutDescriptor::CalculateCapacity(Map* map, DescriptorArray* descriptors,
int layout_descriptor_length;
const int kMaxWordsPerField = kDoubleSize / kPointerSize;
if (num_descriptors <= kSmiValueSize / kMaxWordsPerField) {
if (num_descriptors <= kBitsInSmiLayout / kMaxWordsPerField) {
// Even in the "worst" case (all fields are doubles) it would fit into
// a Smi, so no need to calculate length.
layout_descriptor_length = kSmiValueSize;
layout_descriptor_length = kBitsInSmiLayout;
} else {
layout_descriptor_length = 0;
......
......@@ -142,13 +142,13 @@ bool LayoutDescriptor::IsTagged(int field_index, int max_sequence_length,
bool is_tagged = (value & layout_mask) == 0;
if (!is_tagged) value = ~value; // Count set bits instead of cleared bits.
value = value & ~(layout_mask - 1); // Clear bits we are not interested in.
int sequence_length =
base::bits::CountTrailingZeros(value) - layout_bit_index;
int sequence_length;
if (IsSlowLayout()) {
sequence_length = base::bits::CountTrailingZeros(value) - layout_bit_index;
if (layout_bit_index + sequence_length == kBitsPerLayoutWord) {
// This is a contiguous sequence till the end of current word, proceed
// counting in the subsequent words.
if (IsSlowLayout()) {
if (layout_bit_index + sequence_length == kBitsPerLayoutWord) {
// This is a contiguous sequence till the end of current word, proceed
// counting in the subsequent words.
++layout_word_index;
int num_words = number_of_layout_words();
for (; layout_word_index < num_words; layout_word_index++) {
......@@ -161,7 +161,17 @@ bool LayoutDescriptor::IsTagged(int field_index, int max_sequence_length,
if (sequence_length >= max_sequence_length) break;
if (cur_sequence_length != kBitsPerLayoutWord) break;
}
if (is_tagged && (field_index + sequence_length == capacity())) {
// The contiguous sequence of tagged fields lasts till the end of the
// layout descriptor which means that all the fields starting from
// field_index are tagged.
sequence_length = std::numeric_limits<int>::max();
}
}
} else { // Fast layout.
sequence_length = Min(base::bits::CountTrailingZeros(value),
static_cast<unsigned>(kBitsInSmiLayout)) -
layout_bit_index;
if (is_tagged && (field_index + sequence_length == capacity())) {
// The contiguous sequence of tagged fields lasts till the end of the
// layout descriptor which means that all the fields starting from
......
......@@ -96,6 +96,10 @@ class LayoutDescriptor : public ByteArray {
LayoutDescriptor* SetTaggedForTesting(int field_index, bool tagged);
private:
// Exclude sign-bit to simplify encoding.
static constexpr int kBitsInSmiLayout =
SmiValuesAre32Bits() ? 32 : kSmiValueSize - 1;
static const int kBitsPerLayoutWord = 32;
int number_of_layout_words() { return length() / kUInt32Size; }
uint32_t get_layout_word(int index) const { return get_uint32(index); }
......
......@@ -5337,6 +5337,7 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
if (SmiValuesAre32Bits()) {
Lw(dst, MemOperand(src.rm(), SmiWordOffset(src.offset())));
} else {
DCHECK(SmiValuesAre31Bits());
Lw(dst, src);
SmiUntag(dst);
}
......
......@@ -499,10 +499,10 @@ class TurboAssembler : public Assembler {
void SmiUntag(Register dst, const MemOperand& src);
void SmiUntag(Register dst, Register src) {
if (SmiValuesAre32Bits()) {
STATIC_ASSERT(kSmiShift == 32);
dsra32(dst, src, 0);
dsra32(dst, src, kSmiShift - 32);
} else {
sra(dst, src, kSmiTagSize);
DCHECK(SmiValuesAre31Bits());
sra(dst, src, kSmiShift);
}
}
......@@ -1166,9 +1166,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void SmiTag(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0);
if (SmiValuesAre32Bits()) {
STATIC_ASSERT(kSmiShift == 32);
dsll32(dst, src, 0);
} else {
DCHECK(SmiValuesAre31Bits());
Addu(dst, src, src);
}
}
......@@ -1183,6 +1183,7 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// The int portion is upper 32-bits of 64-bit word.
dsra(dst, src, kSmiShift - scale);
} else {
DCHECK(SmiValuesAre31Bits());
DCHECK_GE(scale, kSmiTagSize);
sll(dst, src, scale - kSmiTagSize);
}
......
......@@ -567,9 +567,9 @@ int SearchString(Isolate* isolate,
// and pattern as vectors before calling SearchString. Used from the
// StringIndexOf builtin.
template <typename SubjectChar, typename PatternChar>
int SearchStringRaw(Isolate* isolate, const SubjectChar* subject_ptr,
int subject_length, const PatternChar* pattern_ptr,
int pattern_length, int start_index) {
intptr_t SearchStringRaw(Isolate* isolate, const SubjectChar* subject_ptr,
int subject_length, const PatternChar* pattern_ptr,
int pattern_length, int start_index) {
DisallowHeapAllocation no_gc;
Vector<const SubjectChar> subject(subject_ptr, subject_length);
Vector<const PatternChar> pattern(pattern_ptr, pattern_length);
......
......@@ -248,6 +248,10 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
return false;
}
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
UNREACHABLE();
}
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src, Label* trap) {
......
......@@ -619,6 +619,10 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
return true;
}
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
Sxtw(dst, src);
}
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src, Label* trap) {
......
......@@ -860,6 +860,10 @@ void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
&TurboAssembler::ShrPair_cl, pinned);
}
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
UNREACHABLE();
}
void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (CpuFeatures::IsSupported(AVX)) {
......
......@@ -441,6 +441,8 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
Register amount, LiftoffRegList pinned = {});
inline void emit_i32_to_intptr(Register dst, Register src);
inline void emit_ptrsize_add(Register dst, Register lhs, Register rhs) {
if (kPointerSize == 8) {
emit_i64_add(LiftoffRegister(dst), LiftoffRegister(lhs),
......
......@@ -1597,32 +1597,33 @@ class LiftoffCompiler {
void Int32ToSmi(LiftoffRegister dst, Register src, Register scratch) {
constexpr int kTotalSmiShift = kSmiTagSize + kSmiShiftSize;
// TODO(clemensh): Shift by immediate directly.
if (kPointerSize == 4) {
__ LoadConstant(LiftoffRegister(scratch),
WasmValue(int32_t{kTotalSmiShift}));
__ emit_i32_shl(dst.gp(), src, scratch);
} else {
if (SmiValuesAre32Bits()) {
__ LoadConstant(LiftoffRegister(scratch),
WasmValue(int64_t{kTotalSmiShift}));
__ emit_i64_shl(dst, LiftoffRegister(src), scratch);
} else {
DCHECK(SmiValuesAre31Bits());
__ LoadConstant(LiftoffRegister(scratch),
WasmValue(int32_t{kTotalSmiShift}));
__ emit_i32_shl(dst.gp(), src, scratch);
if (kPointerSize == kInt64Size) {
__ emit_i32_to_intptr(dst.gp(), dst.gp());
}
}
}
void SmiToInt32(Register dst, LiftoffRegister src, Register scratch) {
constexpr int kTotalSmiShift = kSmiTagSize + kSmiShiftSize;
// TODO(clemensh): Shift by immediate directly.
if (kPointerSize == 4) {
__ LoadConstant(LiftoffRegister(scratch),
WasmValue(int32_t{kTotalSmiShift}));
__ emit_i32_sar(dst, src.gp(), scratch);
} else {
// Assert that we shift by exactly 32 bit. This makes the returned value a
// zero-extended 32-bit value without emitting further instructions.
static_assert(kPointerSize == 4 || kTotalSmiShift == 32,
"shift by exactly 32 bit");
if (SmiValuesAre32Bits()) {
__ LoadConstant(LiftoffRegister(scratch),
WasmValue(int64_t{kTotalSmiShift}));
__ emit_i64_shr(LiftoffRegister(dst), src, scratch);
} else {
DCHECK(SmiValuesAre31Bits());
__ LoadConstant(LiftoffRegister(scratch),
WasmValue(int32_t{kTotalSmiShift}));
__ emit_i32_sar(dst, src.gp(), scratch);
}
}
......
......@@ -743,6 +743,10 @@ void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
&TurboAssembler::ShrPair, pinned);
}
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
UNREACHABLE();
}
void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
TurboAssembler::Neg_s(dst, src);
}
......
......@@ -633,6 +633,10 @@ I64_SHIFTOP(shr, dsrlv)
#undef I64_SHIFTOP
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
addu(dst, src, zero_reg);
}
void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
TurboAssembler::Neg_s(dst, src);
}
......
......@@ -257,6 +257,10 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
return true;
}
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
UNREACHABLE();
}
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src, Label* trap) {
......
......@@ -257,6 +257,10 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
return true;
}
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
UNREACHABLE();
}
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src, Label* trap) {
......
......@@ -755,6 +755,10 @@ void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
&Assembler::shrq_cl, pinned);
}
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
movsxlq(dst, src);
}
void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (CpuFeatures::IsSupported(AVX)) {
......
......@@ -1103,6 +1103,7 @@ void MacroAssembler::SmiTag(Register dst, Register src) {
if (dst != src) {
movp(dst, src);
}
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
shlp(dst, Immediate(kSmiShift));
}
......@@ -1111,6 +1112,7 @@ void TurboAssembler::SmiUntag(Register dst, Register src) {
if (dst != src) {
movp(dst, src);
}
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
sarp(dst, Immediate(kSmiShift));
}
......@@ -1218,7 +1220,16 @@ void MacroAssembler::SmiAddConstant(Operand dst, Smi* constant) {
Immediate(constant->value()));
} else {
DCHECK(SmiValuesAre31Bits());
addp(dst, Immediate(constant));
if (kPointerSize == kInt64Size) {
// Sign-extend value after addition
movl(kScratchRegister, dst);
addl(kScratchRegister, Immediate(constant));
movsxlq(kScratchRegister, kScratchRegister);
movq(dst, kScratchRegister);
} else {
DCHECK_EQ(kSmiShiftSize, 32);
addp(dst, Immediate(constant));
}
}
}
}
......@@ -1241,18 +1252,21 @@ SmiIndex MacroAssembler::SmiToIndex(Register dst,
return SmiIndex(dst, times_1);
} else {
DCHECK(SmiValuesAre31Bits());
DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
if (dst != src) {
movp(dst, src);
}
// We have to sign extend the index register to 64-bit as the SMI might
// be negative.
movsxlq(dst, dst);
if (shift == times_1) {
sarq(dst, Immediate(kSmiShift));
return SmiIndex(dst, times_1);
if (shift < kSmiShift) {
sarq(dst, Immediate(kSmiShift - shift));
} else if (shift != kSmiShift) {
if (shift - kSmiShift <= static_cast<int>(times_8)) {
return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiShift));
}
shlq(dst, Immediate(shift - kSmiShift));
}
return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
return SmiIndex(dst, times_1);
}
}
......
......@@ -85,10 +85,15 @@ static void construct_call(const v8::FunctionCallbackInfo<v8::Value>& args) {
.FromJust();
#elif defined(V8_HOST_ARCH_64_BIT)
Address fp = calling_frame->fp();
int32_t low_bits = static_cast<int32_t>(fp & 0xFFFFFFFF);
int32_t high_bits = static_cast<int32_t>(fp >> 32);
args.This()->Set(context, v8_str("low_bits"), v8_num(low_bits)).FromJust();
args.This()->Set(context, v8_str("high_bits"), v8_num(high_bits)).FromJust();
uint64_t kSmiValueMask =
(static_cast<uintptr_t>(1) << (kSmiValueSize - 1)) - 1;
int32_t low_bits = static_cast<int32_t>(fp & kSmiValueMask);
fp >>= kSmiValueSize - 1;
int32_t high_bits = static_cast<int32_t>(fp & kSmiValueMask);
fp >>= kSmiValueSize - 1;
CHECK_EQ(fp, 0); // Ensure all the bits are successfully encoded.
args.This()->Set(context, v8_str("low_bits"), v8_int(low_bits)).FromJust();
args.This()->Set(context, v8_str("high_bits"), v8_int(high_bits)).FromJust();
#else
#error Host architecture is neither 32-bit nor 64-bit.
#endif
......
......@@ -186,7 +186,8 @@ TEST(SmiCompare) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(&allocated);
byte* buffer =
AllocateAssemblerBuffer(&allocated, 2 * Assembler::kMinimalBufferSize);
MacroAssembler assembler(isolate, buffer, static_cast<int>(allocated),
v8::internal::CodeObjectRequired::kYes);
......
This diff is collapsed.
......@@ -78,9 +78,14 @@ Address TraceExtension::GetFP(const v8::FunctionCallbackInfo<v8::Value>& args) {
#if defined(V8_HOST_ARCH_32_BIT)
Address fp = *reinterpret_cast<Address*>(*args[0]);
#elif defined(V8_HOST_ARCH_64_BIT)
int64_t low_bits = *reinterpret_cast<uint64_t*>(*args[0]) >> 32;
int64_t high_bits = *reinterpret_cast<uint64_t*>(*args[1]);
Address fp = static_cast<Address>(high_bits | low_bits);
uint64_t kSmiValueMask =
(static_cast<uintptr_t>(1) << (kSmiValueSize - 1)) - 1;
uint64_t low_bits =
(*reinterpret_cast<Smi**>(*args[0]))->value() & kSmiValueMask;
uint64_t high_bits =
(*reinterpret_cast<Smi**>(*args[1]))->value() & kSmiValueMask;
Address fp =
static_cast<Address>((high_bits << (kSmiValueSize - 1)) | low_bits);
#else
#error Host architecture is neither 32-bit nor 64-bit.
#endif
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment