Commit 9ed4b965 authored by Jakob Kummerow's avatar Jakob Kummerow Committed by Commit Bot

Fix building with GCC 7.x and 8.x

GCC 7.x doesn't like it (-Werror=subobject-linkage) when a class
either derives from a class or has a member field of a type that
was declared in an anonymous namespace.
It is also opposed (-Werror=attributes) to visibility attributes
being defined at explicit template instantiations.
GCC 8.x further has reservations (-Werror=class-memaccess) about
letting memset/memcpy modify areas within non-POD objects.

Change-Id: Ic5107bb5ee3af6233e3741e3ef78d03a0a84005a
Reviewed-on: https://chromium-review.googlesource.com/1208306
Commit-Queue: Jakob Kummerow <jkummerow@chromium.org>
Reviewed-by: 's avatarBen Titzer <titzer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#56106}
parent 81c9e393
......@@ -3707,8 +3707,6 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
}
}
namespace {
class ArrayFlattenAssembler : public CodeStubAssembler {
public:
explicit ArrayFlattenAssembler(compiler::CodeAssemblerState* state)
......@@ -3849,8 +3847,6 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
}
};
} // namespace
// https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray
TF_BUILTIN(FlattenIntoArray, ArrayFlattenAssembler) {
Node* const context = Parameter(Descriptor::kContext);
......
......@@ -9,6 +9,8 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/handles-inl.h"
#include "src/objects/map-inl.h"
#ifdef DEBUG
#define TRACE(...) \
......@@ -669,9 +671,10 @@ void ReduceNode(const Operator* op, EscapeAnalysisTracker::Scope* current,
current->Get(map_field).To(&map)) {
if (map) {
Type const map_type = NodeProperties::GetType(map);
AllowHandleDereference handle_dereference;
if (map_type.IsHeapConstant() &&
params.maps().contains(
bit_cast<Handle<Map>>(map_type.AsHeapConstant()->Value()))) {
Handle<Map>::cast(map_type.AsHeapConstant()->Value()))) {
current->MarkForDeletion();
break;
}
......
......@@ -1048,7 +1048,7 @@ class V8_EXPORT_PRIVATE Constant final {
explicit Constant(float v) : type_(kFloat32), value_(bit_cast<int32_t>(v)) {}
explicit Constant(double v) : type_(kFloat64), value_(bit_cast<int64_t>(v)) {}
explicit Constant(ExternalReference ref)
: type_(kExternalReference), value_(bit_cast<intptr_t>(ref)) {}
: type_(kExternalReference), value_(bit_cast<intptr_t>(ref.address())) {}
explicit Constant(Handle<HeapObject> obj)
: type_(kHeapObject), value_(bit_cast<intptr_t>(obj)) {}
explicit Constant(RpoNumber rpo) : type_(kRpoNumber), value_(rpo.ToInt()) {}
......@@ -1093,7 +1093,7 @@ class V8_EXPORT_PRIVATE Constant final {
ExternalReference ToExternalReference() const {
DCHECK_EQ(kExternalReference, type());
return bit_cast<ExternalReference>(static_cast<intptr_t>(value_));
return ExternalReference::FromRawAddress(static_cast<Address>(value_));
}
RpoNumber ToRpoNumber() const {
......
......@@ -848,8 +848,9 @@ Reduction LoadElimination::ReduceStoreField(Node* node) {
Type const new_value_type = NodeProperties::GetType(new_value);
if (new_value_type.IsHeapConstant()) {
// Record the new {object} map information.
AllowHandleDereference handle_dereference;
ZoneHandleSet<Map> object_maps(
bit_cast<Handle<Map>>(new_value_type.AsHeapConstant()->Value()));
Handle<Map>::cast(new_value_type.AsHeapConstant()->Value()));
state = state->SetMaps(object, object_maps, zone());
}
} else {
......
......@@ -38,7 +38,7 @@ bool NodeCache<Key, Hash, Pred>::Resize(Zone* zone) {
size_ *= 4;
size_t num_entries = size_ + kLinearProbe;
entries_ = zone->NewArray<Entry>(num_entries);
memset(entries_, 0, sizeof(Entry) * num_entries);
memset(static_cast<void*>(entries_), 0, sizeof(Entry) * num_entries);
// Insert the old entries into the new block.
for (size_t i = 0; i < old_size; ++i) {
......@@ -69,7 +69,7 @@ Node** NodeCache<Key, Hash, Pred>::Find(Zone* zone, Key key) {
size_t num_entries = kInitialSize + kLinearProbe;
entries_ = zone->NewArray<Entry>(num_entries);
size_ = kInitialSize;
memset(entries_, 0, sizeof(Entry) * num_entries);
memset(static_cast<void*>(entries_), 0, sizeof(Entry) * num_entries);
Entry* entry = &entries_[hash & (kInitialSize - 1)];
entry->key_ = key;
return &entry->value_;
......
......@@ -27,7 +27,7 @@ class Node;
// nodes such as constants, parameters, etc.
template <typename Key, typename Hash = base::hash<Key>,
typename Pred = std::equal_to<Key> >
class NodeCache final {
class V8_EXPORT_PRIVATE NodeCache final {
public:
explicit NodeCache(unsigned max = 256)
: entries_(nullptr), size_(0), max_(max) {}
......
......@@ -38,7 +38,8 @@ SimdScalarLowering::SimdScalarLowering(
DCHECK_NOT_NULL(graph());
DCHECK_NOT_NULL(graph()->end());
replacements_ = zone()->NewArray<Replacement>(graph()->NodeCount());
memset(replacements_, 0, sizeof(Replacement) * graph()->NodeCount());
memset(static_cast<void*>(replacements_), 0,
sizeof(Replacement) * graph()->NodeCount());
}
void SimdScalarLowering::LowerGraph() {
......
......@@ -186,32 +186,6 @@ void ChangeToPureOp(Node* node, const Operator* new_op) {
NodeProperties::ChangeOp(node, new_op);
}
#ifdef DEBUG
// Helpers for monotonicity checking.
class InputUseInfos {
public:
explicit InputUseInfos(Zone* zone) : input_use_infos_(zone) {}
void SetAndCheckInput(Node* node, int index, UseInfo use_info) {
if (input_use_infos_.empty()) {
input_use_infos_.resize(node->InputCount(), UseInfo::None());
}
// Check that the new use informatin is a super-type of the old
// one.
DCHECK(IsUseLessGeneral(input_use_infos_[index], use_info));
input_use_infos_[index] = use_info;
}
private:
ZoneVector<UseInfo> input_use_infos_;
static bool IsUseLessGeneral(UseInfo use1, UseInfo use2) {
return use1.truncation().IsLessGeneralThan(use2.truncation());
}
};
#endif // DEBUG
bool CanOverflowSigned32(const Operator* op, Type left, Type right,
Zone* type_zone) {
// We assume the inputs are checked Signed32 (or known statically
......@@ -241,6 +215,32 @@ bool IsSomePositiveOrderedNumber(Type type) {
} // namespace
#ifdef DEBUG
// Helpers for monotonicity checking.
class InputUseInfos {
public:
explicit InputUseInfos(Zone* zone) : input_use_infos_(zone) {}
void SetAndCheckInput(Node* node, int index, UseInfo use_info) {
if (input_use_infos_.empty()) {
input_use_infos_.resize(node->InputCount(), UseInfo::None());
}
// Check that the new use informatin is a super-type of the old
// one.
DCHECK(IsUseLessGeneral(input_use_infos_[index], use_info));
input_use_infos_[index] = use_info;
}
private:
ZoneVector<UseInfo> input_use_infos_;
static bool IsUseLessGeneral(UseInfo use1, UseInfo use2) {
return use1.truncation().IsLessGeneralThan(use2.truncation());
}
};
#endif // DEBUG
class RepresentationSelector {
public:
// Information for each node tracked during the fixpoint.
......
......@@ -30,8 +30,6 @@
namespace v8 {
namespace internal {
namespace {
inline double JunkStringValue() {
return bit_cast<double, uint64_t>(kQuietNaNMask);
}
......@@ -812,8 +810,6 @@ parsing_done:
return (sign == NEGATIVE) ? -converted : converted;
}
} // namespace
double StringToDouble(UnicodeCache* unicode_cache,
const char* str, int flags, double empty_string_val) {
// We cast to const uint8_t* here to avoid instantiating the
......
......@@ -824,8 +824,8 @@ ExternalReference ExternalReference::page_flags(Page* page) {
MemoryChunk::kFlagsOffset);
}
ExternalReference ExternalReference::ForDeoptEntry(Address entry) {
return ExternalReference(entry);
ExternalReference ExternalReference::FromRawAddress(Address address) {
return ExternalReference(address);
}
ExternalReference ExternalReference::cpu_features() {
......
......@@ -268,7 +268,7 @@ class ExternalReference {
static ExternalReference page_flags(Page* page);
static ExternalReference ForDeoptEntry(Address entry);
static ExternalReference FromRawAddress(Address address);
#define DECL_EXTERNAL_REFERENCE(name, desc) static ExternalReference name();
EXTERNAL_REFERENCE_LIST(DECL_EXTERNAL_REFERENCE)
......
......@@ -42,9 +42,7 @@ class InnerPointerToCodeCache {
Flush();
}
void Flush() {
memset(&cache_[0], 0, sizeof(cache_));
}
void Flush() { memset(static_cast<void*>(&cache_[0]), 0, sizeof(cache_)); }
InnerPointerToCodeCacheEntry* GetCacheEntry(Address inner_pointer);
......
......@@ -654,7 +654,7 @@ void JitLogger::LogRecordedBuffer(AbstractCode* code,
SharedFunctionInfo* shared, const char* name,
int length) {
JitCodeEvent event;
memset(&event, 0, sizeof(event));
memset(static_cast<void*>(&event), 0, sizeof(event));
event.type = JitCodeEvent::CODE_ADDED;
event.code_start = reinterpret_cast<void*>(code->InstructionStart());
event.code_type =
......@@ -675,7 +675,7 @@ void JitLogger::LogRecordedBuffer(AbstractCode* code,
void JitLogger::LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) {
JitCodeEvent event;
memset(&event, 0, sizeof(event));
memset(static_cast<void*>(&event), 0, sizeof(event));
event.type = JitCodeEvent::CODE_ADDED;
event.code_type = JitCodeEvent::JIT_CODE;
event.code_start = code->instructions().start();
......@@ -707,7 +707,7 @@ void JitLogger::AddCodeLinePosInfoEvent(
int position,
JitCodeEvent::PositionType position_type) {
JitCodeEvent event;
memset(&event, 0, sizeof(event));
memset(static_cast<void*>(&event), 0, sizeof(event));
event.type = JitCodeEvent::CODE_ADD_LINE_POS_INFO;
event.user_data = jit_handler_data;
event.line_info.offset = pc_offset;
......@@ -721,7 +721,7 @@ void JitLogger::AddCodeLinePosInfoEvent(
void* JitLogger::StartCodePosInfoEvent() {
JitCodeEvent event;
memset(&event, 0, sizeof(event));
memset(static_cast<void*>(&event), 0, sizeof(event));
event.type = JitCodeEvent::CODE_START_LINE_INFO_RECORDING;
event.isolate = reinterpret_cast<v8::Isolate*>(isolate_);
......@@ -732,7 +732,7 @@ void* JitLogger::StartCodePosInfoEvent() {
void JitLogger::EndCodePosInfoEvent(Address start_address,
void* jit_handler_data) {
JitCodeEvent event;
memset(&event, 0, sizeof(event));
memset(static_cast<void*>(&event), 0, sizeof(event));
event.type = JitCodeEvent::CODE_END_LINE_INFO_RECORDING;
event.code_start = reinterpret_cast<void*>(start_address);
event.user_data = jit_handler_data;
......
......@@ -18,8 +18,6 @@
namespace v8 {
namespace internal {
namespace {
struct ModuleHandleHash {
V8_INLINE size_t operator()(Handle<Module> module) const {
return module->hash();
......@@ -82,8 +80,6 @@ class UnorderedStringMap
zone)) {}
};
} // anonymous namespace
class Module::ResolveSet
: public std::unordered_map<
Handle<Module>, UnorderedStringSet*, ModuleHandleHash,
......
......@@ -409,42 +409,6 @@ Address CompileLazy(Isolate* isolate, NativeModule* native_module,
return result->instruction_start();
}
namespace {
bool compile_lazy(const WasmModule* module) {
return FLAG_wasm_lazy_compilation ||
(FLAG_asm_wasm_lazy_compilation && module->origin == kAsmJsOrigin);
}
byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
return static_cast<byte*>(buffer.ToHandleChecked()->backing_store()) + offset;
}
void RecordStats(const Code* code, Counters* counters) {
counters->wasm_generated_code_size()->Increment(code->body_size());
counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
}
bool in_bounds(uint32_t offset, size_t size, size_t upper) {
return offset + size <= upper && offset + size >= offset;
}
using WasmInstanceMap =
IdentityMap<Handle<WasmInstanceObject>, FreeStoreAllocationPolicy>;
double MonotonicallyIncreasingTimeInMs() {
return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
base::Time::kMillisecondsPerSecond;
}
ModuleEnv CreateDefaultModuleEnv(const WasmModule* module,
bool allow_trap_handler = true) {
UseTrapHandler use_trap_handler =
trap_handler::IsTrapHandlerEnabled() && allow_trap_handler
? kUseTrapHandler
: kNoTrapHandler;
return ModuleEnv(module, use_trap_handler, kRuntimeExceptionSupport);
}
// The CompilationUnitBuilder builds compilation units and stores them in an
// internal buffer. The buffer is moved into the working queue of the
// CompilationState when {Commit} is called.
......@@ -503,6 +467,42 @@ class CompilationUnitBuilder {
std::vector<std::unique_ptr<WasmCompilationUnit>> tiering_units_;
};
namespace {
bool compile_lazy(const WasmModule* module) {
return FLAG_wasm_lazy_compilation ||
(FLAG_asm_wasm_lazy_compilation && module->origin == kAsmJsOrigin);
}
byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
return static_cast<byte*>(buffer.ToHandleChecked()->backing_store()) + offset;
}
void RecordStats(const Code* code, Counters* counters) {
counters->wasm_generated_code_size()->Increment(code->body_size());
counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
}
bool in_bounds(uint32_t offset, size_t size, size_t upper) {
return offset + size <= upper && offset + size >= offset;
}
using WasmInstanceMap =
IdentityMap<Handle<WasmInstanceObject>, FreeStoreAllocationPolicy>;
double MonotonicallyIncreasingTimeInMs() {
return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
base::Time::kMillisecondsPerSecond;
}
ModuleEnv CreateDefaultModuleEnv(const WasmModule* module,
bool allow_trap_handler = true) {
UseTrapHandler use_trap_handler =
trap_handler::IsTrapHandlerEnabled() && allow_trap_handler
? kUseTrapHandler
: kNoTrapHandler;
return ModuleEnv(module, use_trap_handler, kRuntimeExceptionSupport);
}
// Run by each compilation task and by the main thread (i.e. in both
// foreground and background threads). The no_finisher_callback is called
// within the result_mutex_ lock when no finishing task is running, i.e. when
......
......@@ -641,6 +641,8 @@ const char* OpcodeName(uint32_t val) {
return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(val));
}
} // namespace
class SideTable;
// Code and metadata needed to execute a function.
......@@ -902,32 +904,6 @@ class SideTable : public ZoneObject {
}
};
struct ExternalCallResult {
enum Type {
// The function should be executed inside this interpreter.
INTERNAL,
// For indirect calls: Table or function does not exist.
INVALID_FUNC,
// For indirect calls: Signature does not match expected signature.
SIGNATURE_MISMATCH,
// The function was executed and returned normally.
EXTERNAL_RETURNED,
// The function was executed, threw an exception, and the stack was unwound.
EXTERNAL_UNWOUND
};
Type type;
// If type is INTERNAL, this field holds the function to call internally.
InterpreterCode* interpreter_code;
ExternalCallResult(Type type) : type(type) { // NOLINT
DCHECK_NE(INTERNAL, type);
}
ExternalCallResult(Type type, InterpreterCode* code)
: type(type), interpreter_code(code) {
DCHECK_EQ(INTERNAL, type);
}
};
// The main storage for interpreter code. It maps {WasmFunction} to the
// metadata needed to execute each function.
class CodeMap {
......@@ -1037,6 +1013,34 @@ class CodeMap {
}
};
namespace {
struct ExternalCallResult {
enum Type {
// The function should be executed inside this interpreter.
INTERNAL,
// For indirect calls: Table or function does not exist.
INVALID_FUNC,
// For indirect calls: Signature does not match expected signature.
SIGNATURE_MISMATCH,
// The function was executed and returned normally.
EXTERNAL_RETURNED,
// The function was executed, threw an exception, and the stack was unwound.
EXTERNAL_UNWOUND
};
Type type;
// If type is INTERNAL, this field holds the function to call internally.
InterpreterCode* interpreter_code;
ExternalCallResult(Type type) : type(type) { // NOLINT
DCHECK_NE(INTERNAL, type);
}
ExternalCallResult(Type type, InterpreterCode* code)
: type(type), interpreter_code(code) {
DCHECK_EQ(INTERNAL, type);
}
};
// Like a static_cast from src to dst, but specialized for boxed floats.
template <typename dst, typename src>
struct converter {
......@@ -1073,6 +1077,8 @@ V8_INLINE bool has_nondeterminism<double>(double val) {
return std::isnan(val);
}
} // namespace
// Responsible for executing code directly.
class ThreadImpl {
struct Activation {
......@@ -2985,6 +2991,8 @@ class InterpretedFrameImpl {
}
};
namespace {
// Converters between WasmInterpreter::Thread and WasmInterpreter::ThreadImpl.
// Thread* is the public interface, without knowledge of the object layout.
// This cast is potentially risky, but as long as we always cast it back before
......
......@@ -28,7 +28,7 @@ TEST_F(WasmModuleBuilderTest, Regression_647329) {
// Test crashed with asan.
ZoneBuffer buffer(zone());
const size_t kSize = ZoneBuffer::kInitialSize * 3 + 4096 + 100;
byte data[kSize];
byte data[kSize] = {0};
buffer.write(data, kSize);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment