Commit db61168a authored by Francis McCabe's avatar Francis McCabe Committed by Commit Bot

Revert "[torque] Allow storing to bitfield structs that are stored in Smis"

This reverts commit 80843eda.

Reason for revert: Causes compilation failure on macs

https://ci.chromium.org/p/v8/builders/ci/Mac%20V8%20FYI%20Release%20(Intel)/8934?


Original change's description:
> [torque] Allow storing to bitfield structs that are stored in Smis
> 
> This change:
> 1. Updates the Torque compiler to allow direct access to bitfields that
>    are packed within Smi values, which previously would have required a
>    separate untagging step,
> 2. Updates JSRegExpStringIterator to represent its flags in Torque,
> 3. Adds reduction cases in MachineOperatorReducer for when the input to
>    a branch or the left-hand side of a Word32Equals is based on a 64-bit
>    shift-and-mask operation which has been truncated to 32 bits, as is
>    the case in the code generated by step 1, and
> 4. Adds a reduction case in MachineOperatorReducer to remove an extra
>    Word64And operation added by step 1.
> 
> Bug: v8:7793
> Change-Id: Ib4ac2def6211b3cae6be25a8b2a644be5c7d6d3f
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2119225
> Commit-Queue: Seth Brenith <seth.brenith@microsoft.com>
> Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#67290}

TBR=tebbi@chromium.org,seth.brenith@microsoft.com,nicohartmann@chromium.org

Change-Id: Ifa683c92631291c9437438682b6efb2e12862682
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:7793
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2159730Reviewed-by: 's avatarFrancis McCabe <fgm@chromium.org>
Commit-Queue: Francis McCabe <fgm@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67291}
parent 80843eda
...@@ -1336,10 +1336,10 @@ TNode<Object> RegExpMatchAllAssembler::CreateRegExpStringIterator( ...@@ -1336,10 +1336,10 @@ TNode<Object> RegExpMatchAllAssembler::CreateRegExpStringIterator(
// 9. Set iterator.[[Done]] to false. // 9. Set iterator.[[Done]] to false.
TNode<Int32T> global_flag = TNode<Int32T> global_flag =
Word32Shl(ReinterpretCast<Int32T>(global), Word32Shl(ReinterpretCast<Int32T>(global),
Int32Constant(JSRegExpStringIterator::GlobalBit::kShift)); Int32Constant(JSRegExpStringIterator::kGlobalBit));
TNode<Int32T> unicode_flag = TNode<Int32T> unicode_flag =
Word32Shl(ReinterpretCast<Int32T>(full_unicode), Word32Shl(ReinterpretCast<Int32T>(full_unicode),
Int32Constant(JSRegExpStringIterator::UnicodeBit::kShift)); Int32Constant(JSRegExpStringIterator::kUnicodeBit));
TNode<Int32T> iterator_flags = Word32Or(global_flag, unicode_flag); TNode<Int32T> iterator_flags = Word32Or(global_flag, unicode_flag);
StoreObjectFieldNoWriteBarrier(iterator, JSRegExpStringIterator::kFlagsOffset, StoreObjectFieldNoWriteBarrier(iterator, JSRegExpStringIterator::kFlagsOffset,
SmiFromInt32(iterator_flags)); SmiFromInt32(iterator_flags));
......
...@@ -12,7 +12,7 @@ namespace internal_coverage { ...@@ -12,7 +12,7 @@ namespace internal_coverage {
const debugInfo = Cast<DebugInfo>(shared.script_or_debug_info) const debugInfo = Cast<DebugInfo>(shared.script_or_debug_info)
otherwise goto IfNoCoverageInfo; otherwise goto IfNoCoverageInfo;
if (!debugInfo.flags.has_coverage_info) goto IfNoCoverageInfo; if (!SmiUntag(debugInfo.flags).has_coverage_info) goto IfNoCoverageInfo;
return UnsafeCast<CoverageInfo>(debugInfo.coverage_info); return UnsafeCast<CoverageInfo>(debugInfo.coverage_info);
} }
......
...@@ -104,6 +104,32 @@ namespace regexp { ...@@ -104,6 +104,32 @@ namespace regexp {
return RegExpPrototypeMatchAllImpl(context, receiver, string); return RegExpPrototypeMatchAllImpl(context, receiver, string);
} }
const kJSRegExpStringIteratorDone:
constexpr int31 generates '1 << JSRegExpStringIterator::kDoneBit';
const kJSRegExpStringIteratorGlobal: constexpr int31
generates '1 << JSRegExpStringIterator::kGlobalBit';
const kJSRegExpStringIteratorUnicode: constexpr int31
generates '1 << JSRegExpStringIterator::kUnicodeBit';
extern macro IsSetSmi(Smi, constexpr int31): bool;
macro HasDoneFlag(flags: Smi): bool {
return IsSetSmi(flags, kJSRegExpStringIteratorDone);
}
macro HasGlobalFlag(flags: Smi): bool {
return IsSetSmi(flags, kJSRegExpStringIteratorGlobal);
}
macro HasUnicodeFlag(flags: Smi): bool {
return IsSetSmi(flags, kJSRegExpStringIteratorUnicode);
}
macro SetDoneFlag(iterator: JSRegExpStringIterator, flags: Smi) {
const newFlags: Smi = flags | kJSRegExpStringIteratorDone;
iterator.flags = newFlags;
}
// https://tc39.github.io/proposal-string-matchall/ // https://tc39.github.io/proposal-string-matchall/
// %RegExpStringIteratorPrototype%.next ( ) // %RegExpStringIteratorPrototype%.next ( )
transitioning javascript builtin RegExpStringIteratorPrototypeNext( transitioning javascript builtin RegExpStringIteratorPrototypeNext(
...@@ -121,8 +147,8 @@ namespace regexp { ...@@ -121,8 +147,8 @@ namespace regexp {
try { try {
// 4. If O.[[Done]] is true, then // 4. If O.[[Done]] is true, then
// a. Return ! CreateIterResultObject(undefined, true). // a. Return ! CreateIterResultObject(undefined, true).
const flags: SmiTagged<JSRegExpStringIteratorFlags> = receiver.flags; const flags: Smi = receiver.flags;
if (flags.done) goto ReturnEmptyDoneResult; if (HasDoneFlag(flags)) goto ReturnEmptyDoneResult;
// 5. Let R be O.[[iteratingRegExp]]. // 5. Let R be O.[[iteratingRegExp]].
const iteratingRegExp: JSReceiver = receiver.iterating_reg_exp; const iteratingRegExp: JSReceiver = receiver.iterating_reg_exp;
...@@ -154,15 +180,15 @@ namespace regexp { ...@@ -154,15 +180,15 @@ namespace regexp {
} }
// 11. Else, // 11. Else,
// b. Else, handle non-global case first. // b. Else, handle non-global case first.
if (!flags.global) { if (!HasGlobalFlag(flags)) {
// i. Set O.[[Done]] to true. // i. Set O.[[Done]] to true.
receiver.flags.done = true; SetDoneFlag(receiver, flags);
// ii. Return ! CreateIterResultObject(match, false). // ii. Return ! CreateIterResultObject(match, false).
return AllocateJSIteratorResult(UnsafeCast<JSAny>(match), False); return AllocateJSIteratorResult(UnsafeCast<JSAny>(match), False);
} }
// a. If global is true, // a. If global is true,
assert(flags.global); assert(HasGlobalFlag(flags));
if (isFastRegExp) { if (isFastRegExp) {
// i. Let matchStr be ? ToString(? Get(match, "0")). // i. Let matchStr be ? ToString(? Get(match, "0")).
const match = UnsafeCast<JSRegExpResult>(match); const match = UnsafeCast<JSRegExpResult>(match);
...@@ -180,7 +206,7 @@ namespace regexp { ...@@ -180,7 +206,7 @@ namespace regexp {
// 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex, // 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex,
// fullUnicode). // fullUnicode).
const nextIndex: Smi = AdvanceStringIndexFast( const nextIndex: Smi = AdvanceStringIndexFast(
iteratingString, thisIndex, flags.unicode); iteratingString, thisIndex, HasUnicodeFlag(flags));
// 3. Perform ? Set(R, "lastIndex", nextIndex, true). // 3. Perform ? Set(R, "lastIndex", nextIndex, true).
FastStoreLastIndex(iteratingRegExp, nextIndex); FastStoreLastIndex(iteratingRegExp, nextIndex);
...@@ -201,8 +227,8 @@ namespace regexp { ...@@ -201,8 +227,8 @@ namespace regexp {
// 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex, // 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex,
// fullUnicode). // fullUnicode).
const nextIndex: Number = const nextIndex: Number = AdvanceStringIndexSlow(
AdvanceStringIndexSlow(iteratingString, thisIndex, flags.unicode); iteratingString, thisIndex, HasUnicodeFlag(flags));
// 3. Perform ? Set(R, "lastIndex", nextIndex, true). // 3. Perform ? Set(R, "lastIndex", nextIndex, true).
SlowStoreLastIndex(iteratingRegExp, nextIndex); SlowStoreLastIndex(iteratingRegExp, nextIndex);
...@@ -213,7 +239,7 @@ namespace regexp { ...@@ -213,7 +239,7 @@ namespace regexp {
// 10. If match is null, then // 10. If match is null, then
label IfNoMatch { label IfNoMatch {
// a. Set O.[[Done]] to true. // a. Set O.[[Done]] to true.
receiver.flags.done = true; SetDoneFlag(receiver, flags);
// b. Return ! CreateIterResultObject(undefined, true). // b. Return ! CreateIterResultObject(undefined, true).
goto ReturnEmptyDoneResult; goto ReturnEmptyDoneResult;
......
...@@ -7348,7 +7348,7 @@ TNode<Uint32T> CodeStubAssembler::DecodeWord32(SloppyTNode<Word32T> word32, ...@@ -7348,7 +7348,7 @@ TNode<Uint32T> CodeStubAssembler::DecodeWord32(SloppyTNode<Word32T> word32,
} }
TNode<UintPtrT> CodeStubAssembler::DecodeWord(SloppyTNode<WordT> word, TNode<UintPtrT> CodeStubAssembler::DecodeWord(SloppyTNode<WordT> word,
uint32_t shift, uintptr_t mask) { uint32_t shift, uint32_t mask) {
DCHECK_EQ((mask >> shift) << shift, mask); DCHECK_EQ((mask >> shift) << shift, mask);
return Unsigned(WordAnd(WordShr(word, static_cast<int>(shift)), return Unsigned(WordAnd(WordShr(word, static_cast<int>(shift)),
IntPtrConstant(mask >> shift))); IntPtrConstant(mask >> shift)));
...@@ -7367,7 +7367,7 @@ TNode<Word32T> CodeStubAssembler::UpdateWord32(TNode<Word32T> word, ...@@ -7367,7 +7367,7 @@ TNode<Word32T> CodeStubAssembler::UpdateWord32(TNode<Word32T> word,
TNode<WordT> CodeStubAssembler::UpdateWord(TNode<WordT> word, TNode<WordT> CodeStubAssembler::UpdateWord(TNode<WordT> word,
TNode<UintPtrT> value, TNode<UintPtrT> value,
uint32_t shift, uintptr_t mask) { uint32_t shift, uint32_t mask) {
DCHECK_EQ((mask >> shift) << shift, mask); DCHECK_EQ((mask >> shift) << shift, mask);
// Ensure the {value} fits fully in the mask. // Ensure the {value} fits fully in the mask.
CSA_ASSERT(this, CSA_ASSERT(this,
......
...@@ -2814,7 +2814,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler ...@@ -2814,7 +2814,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Decodes an unsigned (!) value from |word| to a word-size node. // Decodes an unsigned (!) value from |word| to a word-size node.
TNode<UintPtrT> DecodeWord(SloppyTNode<WordT> word, uint32_t shift, TNode<UintPtrT> DecodeWord(SloppyTNode<WordT> word, uint32_t shift,
uintptr_t mask); uint32_t mask);
// Returns a node that contains the updated values of a |BitField|. // Returns a node that contains the updated values of a |BitField|.
template <typename BitField> template <typename BitField>
...@@ -2850,7 +2850,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler ...@@ -2850,7 +2850,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Returns a node that contains the updated {value} inside {word} starting // Returns a node that contains the updated {value} inside {word} starting
// at {shift} and fitting in {mask}. // at {shift} and fitting in {mask}.
TNode<WordT> UpdateWord(TNode<WordT> word, TNode<UintPtrT> value, TNode<WordT> UpdateWord(TNode<WordT> word, TNode<UintPtrT> value,
uint32_t shift, uintptr_t mask); uint32_t shift, uint32_t mask);
// Returns true if any of the |T|'s bits in given |word32| are set. // Returns true if any of the |T|'s bits in given |word32| are set.
template <typename T> template <typename T>
......
...@@ -43,14 +43,6 @@ class Word32Adapter { ...@@ -43,14 +43,6 @@ class Word32Adapter {
return x.IsWord32Shl(); return x.IsWord32Shl();
} }
template <typename T> template <typename T>
static bool IsWordNShr(const T& x) {
return x.IsWord32Shr();
}
template <typename T>
static bool IsWordNSar(const T& x) {
return x.IsWord32Sar();
}
template <typename T>
static bool IsWordNXor(const T& x) { static bool IsWordNXor(const T& x) {
return x.IsWord32Xor(); return x.IsWord32Xor();
} }
...@@ -73,7 +65,6 @@ class Word32Adapter { ...@@ -73,7 +65,6 @@ class Word32Adapter {
Reduction TryMatchWordNRor(Node* node) { return r_->TryMatchWord32Ror(node); } Reduction TryMatchWordNRor(Node* node) { return r_->TryMatchWord32Ror(node); }
Node* IntNConstant(int32_t value) { return r_->Int32Constant(value); } Node* IntNConstant(int32_t value) { return r_->Int32Constant(value); }
Node* UintNConstant(uint32_t value) { return r_->Uint32Constant(value); }
Node* WordNAnd(Node* lhs, Node* rhs) { return r_->Word32And(lhs, rhs); } Node* WordNAnd(Node* lhs, Node* rhs) { return r_->Word32And(lhs, rhs); }
private: private:
...@@ -103,14 +94,6 @@ class Word64Adapter { ...@@ -103,14 +94,6 @@ class Word64Adapter {
return x.IsWord64Shl(); return x.IsWord64Shl();
} }
template <typename T> template <typename T>
static bool IsWordNShr(const T& x) {
return x.IsWord64Shr();
}
template <typename T>
static bool IsWordNSar(const T& x) {
return x.IsWord64Sar();
}
template <typename T>
static bool IsWordNXor(const T& x) { static bool IsWordNXor(const T& x) {
return x.IsWord64Xor(); return x.IsWord64Xor();
} }
...@@ -136,7 +119,6 @@ class Word64Adapter { ...@@ -136,7 +119,6 @@ class Word64Adapter {
} }
Node* IntNConstant(int64_t value) { return r_->Int64Constant(value); } Node* IntNConstant(int64_t value) { return r_->Int64Constant(value); }
Node* UintNConstant(uint64_t value) { return r_->Uint64Constant(value); }
Node* WordNAnd(Node* lhs, Node* rhs) { return r_->Word64And(lhs, rhs); } Node* WordNAnd(Node* lhs, Node* rhs) { return r_->Word64And(lhs, rhs); }
private: private:
...@@ -264,12 +246,6 @@ Node* MachineOperatorReducer::Uint32Div(Node* dividend, uint32_t divisor) { ...@@ -264,12 +246,6 @@ Node* MachineOperatorReducer::Uint32Div(Node* dividend, uint32_t divisor) {
return quotient; return quotient;
} }
Node* MachineOperatorReducer::TruncateInt64ToInt32(Node* value) {
Node* const node = graph()->NewNode(machine()->TruncateInt64ToInt32(), value);
Reduction const reduction = ReduceTruncateInt64ToInt32(node);
return reduction.Changed() ? reduction.replacement() : node;
}
// Perform constant folding and strength reduction on machine operators. // Perform constant folding and strength reduction on machine operators.
Reduction MachineOperatorReducer::Reduce(Node* node) { Reduction MachineOperatorReducer::Reduce(Node* node) {
switch (node->opcode()) { switch (node->opcode()) {
...@@ -321,20 +297,25 @@ Reduction MachineOperatorReducer::Reduce(Node* node) { ...@@ -321,20 +297,25 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
} }
// TODO(turbofan): fold HeapConstant, ExternalReference, pointer compares // TODO(turbofan): fold HeapConstant, ExternalReference, pointer compares
if (m.LeftEqualsRight()) return ReplaceBool(true); // x == x => true if (m.LeftEqualsRight()) return ReplaceBool(true); // x == x => true
if (m.right().HasValue()) { if (m.left().IsWord32And() && m.right().HasValue()) {
base::Optional<std::pair<Node*, uint32_t>> replacements; Uint32BinopMatcher mand(m.left().node());
if (m.left().IsTruncateInt64ToInt32()) { if ((mand.left().IsWord32Shr() || mand.left().IsWord32Sar()) &&
replacements = ReduceWord32EqualForConstantRhs<Word64Adapter>( mand.right().HasValue()) {
NodeProperties::GetValueInput(m.left().node(), 0), Uint32BinopMatcher mshift(mand.left().node());
static_cast<uint32_t>(m.right().Value())); // ((x >> K1) & K2) == K3 => (x & (K2 << K1)) == (K3 << K1)
} else { if (mshift.right().HasValue()) {
replacements = ReduceWord32EqualForConstantRhs<Word32Adapter>( auto shift_bits = mshift.right().Value();
m.left().node(), static_cast<uint32_t>(m.right().Value())); auto mask = mand.right().Value();
} auto rhs = static_cast<uint32_t>(m.right().Value());
if (replacements) { // Make sure that we won't shift data off the end.
node->ReplaceInput(0, replacements->first); if (shift_bits <= base::bits::CountLeadingZeros(mask) &&
node->ReplaceInput(1, Uint32Constant(replacements->second)); shift_bits <= base::bits::CountLeadingZeros(rhs)) {
return Changed(node); node->ReplaceInput(
0, Word32And(mshift.left().node(), mask << shift_bits));
node->ReplaceInput(1, Int32Constant(rhs << shift_bits));
return Changed(node);
}
}
} }
} }
break; break;
...@@ -822,8 +803,12 @@ Reduction MachineOperatorReducer::Reduce(Node* node) { ...@@ -822,8 +803,12 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0)); if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
return NoChange(); return NoChange();
} }
case IrOpcode::kTruncateInt64ToInt32: case IrOpcode::kTruncateInt64ToInt32: {
return ReduceTruncateInt64ToInt32(node); Int64Matcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceInt32(static_cast<int32_t>(m.Value()));
if (m.IsChangeInt32ToInt64()) return Replace(m.node()->InputAt(0));
break;
}
case IrOpcode::kTruncateFloat64ToFloat32: { case IrOpcode::kTruncateFloat64ToFloat32: {
Float64Matcher m(node->InputAt(0)); Float64Matcher m(node->InputAt(0));
if (m.HasValue()) { if (m.HasValue()) {
...@@ -879,13 +864,6 @@ Reduction MachineOperatorReducer::Reduce(Node* node) { ...@@ -879,13 +864,6 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return NoChange(); return NoChange();
} }
Reduction MachineOperatorReducer::ReduceTruncateInt64ToInt32(Node* node) {
Int64Matcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceInt32(static_cast<int32_t>(m.Value()));
if (m.IsChangeInt32ToInt64()) return Replace(m.node()->InputAt(0));
return NoChange();
}
Reduction MachineOperatorReducer::ReduceInt32Add(Node* node) { Reduction MachineOperatorReducer::ReduceInt32Add(Node* node) {
DCHECK_EQ(IrOpcode::kInt32Add, node->opcode()); DCHECK_EQ(IrOpcode::kInt32Add, node->opcode());
Int32BinopMatcher m(node); Int32BinopMatcher m(node);
...@@ -1551,20 +1529,6 @@ Reduction MachineOperatorReducer::ReduceWordNOr(Node* node) { ...@@ -1551,20 +1529,6 @@ Reduction MachineOperatorReducer::ReduceWordNOr(Node* node) {
} }
if (m.LeftEqualsRight()) return Replace(m.left().node()); // x | x => x if (m.LeftEqualsRight()) return Replace(m.left().node()); // x | x => x
// (x & K1) | K2 => x | K2 if K2 has ones for every zero bit in K1.
// This case can be constructed by UpdateWord and UpdateWord32 in CSA.
if (m.right().HasValue()) {
if (A::IsWordNAnd(m.left())) {
typename A::IntNBinopMatcher mand(m.left().node());
if (mand.right().HasValue()) {
if ((m.right().Value() | mand.right().Value()) == -1) {
node->ReplaceInput(0, mand.left().node());
return Changed(node);
}
}
}
}
return a.TryMatchWordNRor(node); return a.TryMatchWordNRor(node);
} }
...@@ -1718,64 +1682,25 @@ Reduction MachineOperatorReducer::ReduceConditional(Node* node) { ...@@ -1718,64 +1682,25 @@ Reduction MachineOperatorReducer::ReduceConditional(Node* node) {
// Reductions involving control flow happen elsewhere. Non-zero inputs are // Reductions involving control flow happen elsewhere. Non-zero inputs are
// considered true in all conditional ops. // considered true in all conditional ops.
NodeMatcher condition(NodeProperties::GetValueInput(node, 0)); NodeMatcher condition(NodeProperties::GetValueInput(node, 0));
if (condition.IsTruncateInt64ToInt32()) { if (condition.IsWord32And()) {
if (auto replacement = Uint32BinopMatcher mand(condition.node());
ReduceConditionalN<Word64Adapter>(condition.node())) { if ((mand.left().IsWord32Shr() || mand.left().IsWord32Sar()) &&
NodeProperties::ReplaceValueInput(node, *replacement, 0);
return Changed(node);
}
} else if (auto replacement = ReduceConditionalN<Word32Adapter>(node)) {
NodeProperties::ReplaceValueInput(node, *replacement, 0);
return Changed(node);
}
return NoChange();
}
template <typename WordNAdapter>
base::Optional<Node*> MachineOperatorReducer::ReduceConditionalN(Node* node) {
NodeMatcher condition(NodeProperties::GetValueInput(node, 0));
// Branch conditions are 32-bit comparisons against zero, so they are the
// opposite of a 32-bit `x == 0` node. To avoid repetition, we can reuse logic
// for Word32Equal: if `x == 0` can reduce to `y == 0`, then branch(x) can
// reduce to branch(y).
auto replacements =
ReduceWord32EqualForConstantRhs<WordNAdapter>(condition.node(), 0);
if (replacements && replacements->second == 0) return replacements->first;
return {};
}
template <typename WordNAdapter>
base::Optional<std::pair<Node*, uint32_t>>
MachineOperatorReducer::ReduceWord32EqualForConstantRhs(Node* lhs,
uint32_t rhs) {
if (WordNAdapter::IsWordNAnd(NodeMatcher(lhs))) {
typename WordNAdapter::UintNBinopMatcher mand(lhs);
if ((WordNAdapter::IsWordNShr(mand.left()) ||
WordNAdapter::IsWordNSar(mand.left())) &&
mand.right().HasValue()) { mand.right().HasValue()) {
typename WordNAdapter::UintNBinopMatcher mshift(mand.left().node()); Uint32BinopMatcher mshift(mand.left().node());
// ((x >> K1) & K2) == K3 => (x & (K2 << K1)) == (K3 << K1) // Branch condition (x >> K1) & K2 => x & (K2 << K1)
if (mshift.right().HasValue()) { if (mshift.right().HasValue()) {
auto shift_bits = mshift.right().Value(); auto shift_bits = mshift.right().Value();
auto mask = mand.right().Value(); auto mask = mand.right().Value();
// Make sure that we won't shift data off the end, and that all of the // Make sure that we won't shift data off the end.
// data ends up in the lower 32 bits for 64-bit mode. if (shift_bits <= base::bits::CountLeadingZeros(mask)) {
if (shift_bits <= base::bits::CountLeadingZeros(mask) && NodeProperties::ReplaceValueInput(
shift_bits <= base::bits::CountLeadingZeros(rhs) && node, Word32And(mshift.left().node(), mask << shift_bits), 0);
mask << shift_bits <= std::numeric_limits<uint32_t>::max()) { return Changed(node);
Node* new_input = mshift.left().node();
uint32_t new_mask = static_cast<uint32_t>(mask << shift_bits);
uint32_t new_rhs = rhs << shift_bits;
if (WordNAdapter::WORD_SIZE == 64) {
// We can truncate before performing the And.
new_input = TruncateInt64ToInt32(new_input);
}
return std::make_pair(Word32And(new_input, new_mask), new_rhs);
} }
} }
} }
} }
return {}; return NoChange();
} }
CommonOperatorBuilder* MachineOperatorReducer::common() const { CommonOperatorBuilder* MachineOperatorReducer::common() const {
......
...@@ -62,7 +62,6 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final ...@@ -62,7 +62,6 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final
Node* Int32Mul(Node* lhs, Node* rhs); Node* Int32Mul(Node* lhs, Node* rhs);
Node* Int32Div(Node* dividend, int32_t divisor); Node* Int32Div(Node* dividend, int32_t divisor);
Node* Uint32Div(Node* dividend, uint32_t divisor); Node* Uint32Div(Node* dividend, uint32_t divisor);
Node* TruncateInt64ToInt32(Node* value);
Reduction ReplaceBool(bool value) { return ReplaceInt32(value ? 1 : 0); } Reduction ReplaceBool(bool value) { return ReplaceInt32(value ? 1 : 0); }
Reduction ReplaceFloat32(volatile float value) { Reduction ReplaceFloat32(volatile float value) {
...@@ -110,7 +109,6 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final ...@@ -110,7 +109,6 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final
Reduction ReduceFloat64InsertHighWord32(Node* node); Reduction ReduceFloat64InsertHighWord32(Node* node);
Reduction ReduceFloat64Compare(Node* node); Reduction ReduceFloat64Compare(Node* node);
Reduction ReduceFloat64RoundDown(Node* node); Reduction ReduceFloat64RoundDown(Node* node);
Reduction ReduceTruncateInt64ToInt32(Node* node);
Reduction ReduceConditional(Node* node); Reduction ReduceConditional(Node* node);
Graph* graph() const; Graph* graph() const;
...@@ -127,18 +125,6 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final ...@@ -127,18 +125,6 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final
template <typename WordNAdapter> template <typename WordNAdapter>
Reduction ReduceWordNXor(Node* node); Reduction ReduceWordNXor(Node* node);
// Helper for ReduceConditional. Does not perform the actual reduction; just
// returns a new Node that could be used as the input to the condition.
template <typename WordNAdapter>
base::Optional<Node*> ReduceConditionalN(Node* node);
// Helper for finding a reduced equality condition. Does not perform the
// actual reduction; just returns a new pair that could be compared for the
// same outcome.
template <typename WordNAdapter>
base::Optional<std::pair<Node*, uint32_t>> ReduceWord32EqualForConstantRhs(
Node* lhs, uint32_t rhs);
MachineGraph* mcgraph_; MachineGraph* mcgraph_;
bool allow_signalling_nan_; bool allow_signalling_nan_;
}; };
......
...@@ -17,9 +17,9 @@ namespace internal { ...@@ -17,9 +17,9 @@ namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExpStringIterator) TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExpStringIterator)
BOOL_ACCESSORS(JSRegExpStringIterator, flags, done, DoneBit::kShift) BOOL_ACCESSORS(JSRegExpStringIterator, flags, done, kDoneBit)
BOOL_ACCESSORS(JSRegExpStringIterator, flags, global, GlobalBit::kShift) BOOL_ACCESSORS(JSRegExpStringIterator, flags, global, kGlobalBit)
BOOL_ACCESSORS(JSRegExpStringIterator, flags, unicode, UnicodeBit::kShift) BOOL_ACCESSORS(JSRegExpStringIterator, flags, unicode, kUnicodeBit)
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
#define V8_OBJECTS_JS_REGEXP_STRING_ITERATOR_H_ #define V8_OBJECTS_JS_REGEXP_STRING_ITERATOR_H_
#include "src/objects/js-objects.h" #include "src/objects/js-objects.h"
#include "torque-generated/bit-fields-tq.h"
// Has to be the last include (doesn't have include guards): // Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h" #include "src/objects/object-macros.h"
...@@ -29,7 +28,9 @@ class JSRegExpStringIterator ...@@ -29,7 +28,9 @@ class JSRegExpStringIterator
DECL_PRINTER(JSRegExpStringIterator) DECL_PRINTER(JSRegExpStringIterator)
DEFINE_TORQUE_GENERATED_JS_REG_EXP_STRING_ITERATOR_FLAGS() static const int kDoneBit = 0;
static const int kGlobalBit = 1;
static const int kUnicodeBit = 2;
TQ_OBJECT_CONSTRUCTORS(JSRegExpStringIterator) TQ_OBJECT_CONSTRUCTORS(JSRegExpStringIterator)
}; };
......
...@@ -2,17 +2,11 @@ ...@@ -2,17 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
bitfield struct JSRegExpStringIteratorFlags extends uint31 {
done: bool: 1 bit;
global: bool: 1 bit;
unicode: bool: 1 bit;
}
@generateCppClass @generateCppClass
extern class JSRegExpStringIterator extends JSObject { extern class JSRegExpStringIterator extends JSObject {
// The [[IteratingRegExp]] internal property. // The [[IteratingRegExp]] internal property.
iterating_reg_exp: JSReceiver; iterating_reg_exp: JSReceiver;
// The [[IteratedString]] internal property. // The [[IteratedString]] internal property.
iterated_string: String; iterated_string: String;
flags: SmiTagged<JSRegExpStringIteratorFlags>; flags: Smi;
} }
...@@ -53,7 +53,7 @@ class ValueTypeFieldIterator { ...@@ -53,7 +53,7 @@ class ValueTypeFieldIterator {
if (const auto type_wrapped_in_smi = if (const auto type_wrapped_in_smi =
Type::MatchUnaryGeneric(type_, TypeOracle::GetSmiTaggedGeneric())) { Type::MatchUnaryGeneric(type_, TypeOracle::GetSmiTaggedGeneric())) {
type = *type_wrapped_in_smi; type = *type_wrapped_in_smi;
bitfield_start_offset = TargetArchitecture::SmiTagAndShiftSize(); bitfield_start_offset = kSmiTagSize + kSmiShiftSize;
} }
if (const BitFieldStructType* bit_field_struct_type = if (const BitFieldStructType* bit_field_struct_type =
BitFieldStructType::DynamicCast(type)) { BitFieldStructType::DynamicCast(type)) {
......
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
#include "src/torque/csa-generator.h" #include "src/torque/csa-generator.h"
#include "src/common/globals.h" #include "src/common/globals.h"
#include "src/torque/global-context.h"
#include "src/torque/type-oracle.h" #include "src/torque/type-oracle.h"
#include "src/torque/types.h" #include "src/torque/types.h"
#include "src/torque/utils.h" #include "src/torque/utils.h"
...@@ -859,20 +858,13 @@ void CSAGenerator::EmitInstruction(const StoreReferenceInstruction& instruction, ...@@ -859,20 +858,13 @@ void CSAGenerator::EmitInstruction(const StoreReferenceInstruction& instruction,
} }
namespace { namespace {
std::string GetBitFieldSpecialization(const Type* container, std::string GetBitFieldSpecialization(const BitFieldStructType* container,
const BitField& field) { const BitField& field) {
auto smi_tagged_type =
Type::MatchUnaryGeneric(container, TypeOracle::GetSmiTaggedGeneric());
std::string container_type = smi_tagged_type
? "uintptr_t"
: container->GetConstexprGeneratedTypeName();
int offset = smi_tagged_type
? field.offset + TargetArchitecture::SmiTagAndShiftSize()
: field.offset;
std::stringstream stream; std::stringstream stream;
stream << "base::BitField<" stream << "base::BitField<"
<< field.name_and_type.type->GetConstexprGeneratedTypeName() << ", " << field.name_and_type.type->GetConstexprGeneratedTypeName() << ", "
<< offset << ", " << field.num_bits << ", " << container_type << ">"; << field.offset << ", " << field.num_bits << ", "
<< container->GetConstexprGeneratedTypeName() << ">";
return stream.str(); return stream.str();
} }
} // namespace } // namespace
...@@ -885,36 +877,23 @@ void CSAGenerator::EmitInstruction(const LoadBitFieldInstruction& instruction, ...@@ -885,36 +877,23 @@ void CSAGenerator::EmitInstruction(const LoadBitFieldInstruction& instruction,
std::string bit_field_struct = stack->Pop(); std::string bit_field_struct = stack->Pop();
stack->Push(result_name); stack->Push(result_name);
const Type* struct_type = instruction.bit_field_struct_type; const BitFieldStructType* source_type = instruction.bit_field_struct_type;
const Type* field_type = instruction.bit_field.name_and_type.type; const Type* result_type = instruction.bit_field.name_and_type.type;
auto smi_tagged_type = bool source_uintptr = source_type->IsSubtypeOf(TypeOracle::GetUIntPtrType());
Type::MatchUnaryGeneric(struct_type, TypeOracle::GetSmiTaggedGeneric()); bool result_uintptr = result_type->IsSubtypeOf(TypeOracle::GetUIntPtrType());
bool struct_is_pointer_size = std::string source_word_type = source_uintptr ? "WordT" : "Word32T";
IsPointerSizeIntegralType(struct_type) || smi_tagged_type;
DCHECK_IMPLIES(!struct_is_pointer_size, Is32BitIntegralType(struct_type));
bool field_is_pointer_size = IsPointerSizeIntegralType(field_type);
DCHECK_IMPLIES(!field_is_pointer_size, Is32BitIntegralType(field_type));
std::string struct_word_type = struct_is_pointer_size ? "WordT" : "Word32T";
std::string decoder = std::string decoder =
struct_is_pointer_size source_uintptr
? (field_is_pointer_size ? "DecodeWord" : "DecodeWord32FromWord") ? (result_uintptr ? "DecodeWord" : "DecodeWord32FromWord")
: (field_is_pointer_size ? "DecodeWordFromWord32" : "DecodeWord32"); : (result_uintptr ? "DecodeWordFromWord32" : "DecodeWord32");
decls() << " " << field_type->GetGeneratedTypeName() << " " << result_name decls() << " " << result_type->GetGeneratedTypeName() << " " << result_name
<< ";\n"; << ";\n";
if (smi_tagged_type) {
// If the container is a SMI, then UncheckedCast is insufficient and we must
// use a bit cast.
bit_field_struct =
"ca_.BitcastTaggedToWordForTagAndSmiBits(" + bit_field_struct + ")";
}
out() << " " << result_name << " = ca_.UncheckedCast<" out() << " " << result_name << " = ca_.UncheckedCast<"
<< field_type->GetGeneratedTNodeTypeName() << result_type->GetGeneratedTNodeTypeName()
<< ">(CodeStubAssembler(state_)." << decoder << "<" << ">(CodeStubAssembler(state_)." << decoder << "<"
<< GetBitFieldSpecialization(struct_type, instruction.bit_field) << GetBitFieldSpecialization(source_type, instruction.bit_field)
<< ">(ca_.UncheckedCast<" << struct_word_type << ">(" << ">(ca_.UncheckedCast<" << source_word_type << ">("
<< bit_field_struct << ")));\n"; << bit_field_struct << ")));\n";
} }
...@@ -927,46 +906,25 @@ void CSAGenerator::EmitInstruction(const StoreBitFieldInstruction& instruction, ...@@ -927,46 +906,25 @@ void CSAGenerator::EmitInstruction(const StoreBitFieldInstruction& instruction,
std::string bit_field_struct = stack->Pop(); std::string bit_field_struct = stack->Pop();
stack->Push(result_name); stack->Push(result_name);
const Type* struct_type = instruction.bit_field_struct_type; const BitFieldStructType* struct_type = instruction.bit_field_struct_type;
const Type* field_type = instruction.bit_field.name_and_type.type; const Type* field_type = instruction.bit_field.name_and_type.type;
auto smi_tagged_type = bool struct_uintptr = struct_type->IsSubtypeOf(TypeOracle::GetUIntPtrType());
Type::MatchUnaryGeneric(struct_type, TypeOracle::GetSmiTaggedGeneric()); bool field_uintptr = field_type->IsSubtypeOf(TypeOracle::GetUIntPtrType());
bool struct_is_pointer_size = std::string struct_word_type = struct_uintptr ? "WordT" : "Word32T";
IsPointerSizeIntegralType(struct_type) || smi_tagged_type; std::string field_word_type = field_uintptr ? "UintPtrT" : "Uint32T";
DCHECK_IMPLIES(!struct_is_pointer_size, Is32BitIntegralType(struct_type));
bool field_is_pointer_size = IsPointerSizeIntegralType(field_type);
DCHECK_IMPLIES(!field_is_pointer_size, Is32BitIntegralType(field_type));
std::string struct_word_type = struct_is_pointer_size ? "WordT" : "Word32T";
std::string field_word_type = field_is_pointer_size ? "UintPtrT" : "Uint32T";
std::string encoder = std::string encoder =
struct_is_pointer_size struct_uintptr ? (field_uintptr ? "UpdateWord" : "UpdateWord32InWord")
? (field_is_pointer_size ? "UpdateWord" : "UpdateWord32InWord") : (field_uintptr ? "UpdateWordInWord32" : "UpdateWord32");
: (field_is_pointer_size ? "UpdateWordInWord32" : "UpdateWord32");
decls() << " " << struct_type->GetGeneratedTypeName() << " " << result_name decls() << " " << struct_type->GetGeneratedTypeName() << " " << result_name
<< ";\n"; << ";\n";
if (smi_tagged_type) {
// If the container is a SMI, then UncheckedCast is insufficient and we must
// use a bit cast.
bit_field_struct =
"ca_.BitcastTaggedToWordForTagAndSmiBits(" + bit_field_struct + ")";
}
std::string result_expression =
"CodeStubAssembler(state_)." + encoder + "<" +
GetBitFieldSpecialization(struct_type, instruction.bit_field) +
">(ca_.UncheckedCast<" + struct_word_type + ">(" + bit_field_struct +
"), ca_.UncheckedCast<" + field_word_type + ">(" + value + "))";
if (smi_tagged_type) {
result_expression =
"ca_.BitcastWordToTaggedSigned(" + result_expression + ")";
}
out() << " " << result_name << " = ca_.UncheckedCast<" out() << " " << result_name << " = ca_.UncheckedCast<"
<< struct_type->GetGeneratedTNodeTypeName() << ">(" << result_expression << struct_type->GetGeneratedTNodeTypeName()
<< ");\n"; << ">(CodeStubAssembler(state_)." << encoder << "<"
<< GetBitFieldSpecialization(struct_type, instruction.bit_field)
<< ">(ca_.UncheckedCast<" << struct_word_type << ">("
<< bit_field_struct << "), ca_.UncheckedCast<" << field_word_type
<< ">(" << value << ")));\n";
} }
// static // static
......
...@@ -24,10 +24,7 @@ GlobalContext::GlobalContext(Ast ast) ...@@ -24,10 +24,7 @@ GlobalContext::GlobalContext(Ast ast)
TargetArchitecture::TargetArchitecture(bool force_32bit) TargetArchitecture::TargetArchitecture(bool force_32bit)
: tagged_size_(force_32bit ? sizeof(int32_t) : kTaggedSize), : tagged_size_(force_32bit ? sizeof(int32_t) : kTaggedSize),
raw_ptr_size_(force_32bit ? sizeof(int32_t) : kSystemPointerSize), raw_ptr_size_(force_32bit ? sizeof(int32_t) : kSystemPointerSize) {}
smi_tag_and_shift_size_(
kSmiTagSize + (force_32bit ? SmiTagging<kApiInt32Size>::kSmiShiftSize
: kSmiShiftSize)) {}
} // namespace torque } // namespace torque
} // namespace internal } // namespace internal
......
...@@ -93,12 +93,10 @@ class TargetArchitecture : public ContextualClass<TargetArchitecture> { ...@@ -93,12 +93,10 @@ class TargetArchitecture : public ContextualClass<TargetArchitecture> {
static size_t RawPtrSize() { return Get().raw_ptr_size_; } static size_t RawPtrSize() { return Get().raw_ptr_size_; }
static size_t MaxHeapAlignment() { return TaggedSize(); } static size_t MaxHeapAlignment() { return TaggedSize(); }
static bool ArePointersCompressed() { return TaggedSize() < RawPtrSize(); } static bool ArePointersCompressed() { return TaggedSize() < RawPtrSize(); }
static int SmiTagAndShiftSize() { return Get().smi_tag_and_shift_size_; }
private: private:
const size_t tagged_size_; const size_t tagged_size_;
const size_t raw_ptr_size_; const size_t raw_ptr_size_;
const int smi_tag_and_shift_size_;
}; };
} // namespace torque } // namespace torque
......
...@@ -2002,20 +2002,6 @@ LocationReference ImplementationVisitor::GenerateFieldAccess( ...@@ -2002,20 +2002,6 @@ LocationReference ImplementationVisitor::GenerateFieldAccess(
const BitField& field = bitfield_struct->LookupField(fieldname); const BitField& field = bitfield_struct->LookupField(fieldname);
return LocationReference::BitFieldAccess(reference, field); return LocationReference::BitFieldAccess(reference, field);
} }
if (const auto type_wrapped_in_smi = Type::MatchUnaryGeneric(
reference.ReferencedType(), TypeOracle::GetSmiTaggedGeneric())) {
const BitFieldStructType* bitfield_struct =
BitFieldStructType::DynamicCast(*type_wrapped_in_smi);
if (bitfield_struct == nullptr) {
ReportError(
"When a value of type SmiTagged<T> is used in a field access "
"expression, T is expected to be a bitfield struct type. Instead, T "
"is ",
**type_wrapped_in_smi);
}
const BitField& field = bitfield_struct->LookupField(fieldname);
return LocationReference::BitFieldAccess(reference, field);
}
if (reference.IsHeapReference()) { if (reference.IsHeapReference()) {
VisitResult ref = reference.heap_reference(); VisitResult ref = reference.heap_reference();
bool is_const; bool is_const;
...@@ -2204,8 +2190,9 @@ VisitResult ImplementationVisitor::GenerateFetchFromLocation( ...@@ -2204,8 +2190,9 @@ VisitResult ImplementationVisitor::GenerateFetchFromLocation(
// First fetch the bitfield struct, then get the bits out of it. // First fetch the bitfield struct, then get the bits out of it.
VisitResult bit_field_struct = VisitResult bit_field_struct =
GenerateFetchFromLocation(reference.bit_field_struct_location()); GenerateFetchFromLocation(reference.bit_field_struct_location());
assembler().Emit(LoadBitFieldInstruction{bit_field_struct.type(), assembler().Emit(LoadBitFieldInstruction{
reference.bit_field()}); BitFieldStructType::cast(bit_field_struct.type()),
reference.bit_field()});
return VisitResult(reference.ReferencedType(), assembler().TopRange(1)); return VisitResult(reference.ReferencedType(), assembler().TopRange(1));
} else { } else {
if (reference.IsHeapSlice()) { if (reference.IsHeapSlice()) {
...@@ -2284,8 +2271,9 @@ void ImplementationVisitor::GenerateAssignToLocation( ...@@ -2284,8 +2271,9 @@ void ImplementationVisitor::GenerateAssignToLocation(
GenerateImplicitConvert(reference.ReferencedType(), assignment_value); GenerateImplicitConvert(reference.ReferencedType(), assignment_value);
GenerateCopy(bit_field_struct); GenerateCopy(bit_field_struct);
GenerateCopy(converted_value); GenerateCopy(converted_value);
assembler().Emit(StoreBitFieldInstruction{bit_field_struct.type(), assembler().Emit(StoreBitFieldInstruction{
reference.bit_field()}); BitFieldStructType::cast(bit_field_struct.type()),
reference.bit_field()});
GenerateAssignToLocation( GenerateAssignToLocation(
reference.bit_field_struct_location(), reference.bit_field_struct_location(),
VisitResult(bit_field_struct.type(), assembler().TopRange(1))); VisitResult(bit_field_struct.type(), assembler().TopRange(1)));
......
...@@ -349,13 +349,14 @@ struct StoreReferenceInstruction : InstructionBase { ...@@ -349,13 +349,14 @@ struct StoreReferenceInstruction : InstructionBase {
// Pops a bitfield struct; pushes a bitfield value extracted from it. // Pops a bitfield struct; pushes a bitfield value extracted from it.
struct LoadBitFieldInstruction : InstructionBase { struct LoadBitFieldInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE() TORQUE_INSTRUCTION_BOILERPLATE()
LoadBitFieldInstruction(const Type* bit_field_struct_type, BitField bit_field) LoadBitFieldInstruction(const BitFieldStructType* bit_field_struct_type,
BitField bit_field)
: bit_field_struct_type(bit_field_struct_type), : bit_field_struct_type(bit_field_struct_type),
bit_field(std::move(bit_field)) {} bit_field(std::move(bit_field)) {}
DefinitionLocation GetValueDefinition() const; DefinitionLocation GetValueDefinition() const;
const Type* bit_field_struct_type; const BitFieldStructType* bit_field_struct_type;
BitField bit_field; BitField bit_field;
}; };
...@@ -363,14 +364,14 @@ struct LoadBitFieldInstruction : InstructionBase { ...@@ -363,14 +364,14 @@ struct LoadBitFieldInstruction : InstructionBase {
// containing the updated value. // containing the updated value.
struct StoreBitFieldInstruction : InstructionBase { struct StoreBitFieldInstruction : InstructionBase {
TORQUE_INSTRUCTION_BOILERPLATE() TORQUE_INSTRUCTION_BOILERPLATE()
StoreBitFieldInstruction(const Type* bit_field_struct_type, StoreBitFieldInstruction(const BitFieldStructType* bit_field_struct_type,
BitField bit_field) BitField bit_field)
: bit_field_struct_type(bit_field_struct_type), : bit_field_struct_type(bit_field_struct_type),
bit_field(std::move(bit_field)) {} bit_field(std::move(bit_field)) {}
DefinitionLocation GetValueDefinition() const; DefinitionLocation GetValueDefinition() const;
const Type* bit_field_struct_type; const BitFieldStructType* bit_field_struct_type;
BitField bit_field; BitField bit_field;
}; };
......
...@@ -928,17 +928,10 @@ bool IsAllowedAsBitField(const Type* type) { ...@@ -928,17 +928,10 @@ bool IsAllowedAsBitField(const Type* type) {
// Any integer-ish type, including bools and enums which inherit from integer // Any integer-ish type, including bools and enums which inherit from integer
// types, are allowed. Note, however, that we always zero-extend during // types, are allowed. Note, however, that we always zero-extend during
// decoding regardless of signedness. // decoding regardless of signedness.
return IsPointerSizeIntegralType(type) || Is32BitIntegralType(type);
}
bool IsPointerSizeIntegralType(const Type* type) {
return type->IsSubtypeOf(TypeOracle::GetUIntPtrType()) ||
type->IsSubtypeOf(TypeOracle::GetIntPtrType());
}
bool Is32BitIntegralType(const Type* type) {
return type->IsSubtypeOf(TypeOracle::GetUint32Type()) || return type->IsSubtypeOf(TypeOracle::GetUint32Type()) ||
type->IsSubtypeOf(TypeOracle::GetUIntPtrType()) ||
type->IsSubtypeOf(TypeOracle::GetInt32Type()) || type->IsSubtypeOf(TypeOracle::GetInt32Type()) ||
type->IsSubtypeOf(TypeOracle::GetIntPtrType()) ||
type->IsSubtypeOf(TypeOracle::GetBoolType()); type->IsSubtypeOf(TypeOracle::GetBoolType());
} }
......
...@@ -816,8 +816,6 @@ TypeVector LowerParameterTypes(const ParameterTypes& parameter_types, ...@@ -816,8 +816,6 @@ TypeVector LowerParameterTypes(const ParameterTypes& parameter_types,
base::Optional<std::tuple<size_t, std::string>> SizeOf(const Type* type); base::Optional<std::tuple<size_t, std::string>> SizeOf(const Type* type);
bool IsAnyUnsignedInteger(const Type* type); bool IsAnyUnsignedInteger(const Type* type);
bool IsAllowedAsBitField(const Type* type); bool IsAllowedAsBitField(const Type* type);
bool IsPointerSizeIntegralType(const Type* type);
bool Is32BitIntegralType(const Type* type);
base::Optional<NameAndType> ExtractSimpleFieldArraySize( base::Optional<NameAndType> ExtractSimpleFieldArraySize(
const ClassType& class_type, Expression* array_size); const ClassType& class_type, Expression* array_size);
......
...@@ -36,7 +36,7 @@ TestNoMatch('a', 'b'); ...@@ -36,7 +36,7 @@ TestNoMatch('a', 'b');
function TestGlobalRegex(regex_or_string) { function TestGlobalRegex(regex_or_string) {
const iter = 'ab'.matchAll(regex_or_string); const iter = 'ab'.matchAll(/./g);
let next_result = iter.next(); let next_result = iter.next();
assertEquals(['a'], next_result.value); assertEquals(['a'], next_result.value);
assertFalse(next_result.done); assertFalse(next_result.done);
......
...@@ -763,44 +763,6 @@ TEST_F(MachineOperatorReducerTest, Word32AndWithComparisonAndConstantOne) { ...@@ -763,44 +763,6 @@ TEST_F(MachineOperatorReducerTest, Word32AndWithComparisonAndConstantOne) {
} }
} }
// -----------------------------------------------------------------------------
// Word32Or
TEST_F(MachineOperatorReducerTest, Word32OrWithWord32And) {
Node* const p0 = Parameter(0);
TRACED_FOREACH(int32_t, m, kUint32Values) {
TRACED_FOREACH(int32_t, rhs, kUint32Values) {
// To get better coverage of interesting cases, run this test twice:
// once with the mask from kUint32Values, and once with its inverse.
for (int32_t mask : {m, ~m}) {
Reduction const r = Reduce(graph()->NewNode(
machine()->Word32Or(),
graph()->NewNode(machine()->Word32And(), p0, Int32Constant(mask)),
Int32Constant(rhs)));
switch (rhs) {
case 0: // x | 0 => x
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsWord32And(p0, IsInt32Constant(mask)));
break;
case -1: // x | -1 => -1
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(-1));
break;
default: // (x & K1) | K2 => x | K2, if K1 | K2 == -1
if ((mask | rhs) == -1) {
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsWord32Or(p0, IsInt32Constant(rhs)));
} else {
ASSERT_TRUE(!r.Changed());
}
break;
}
}
}
}
}
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Word32Xor // Word32Xor
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment