Commit 5c152a0f authored by Samuel Groß's avatar Samuel Groß Committed by V8 LUCI CQ

[sandbox] Remove a number of native allocations from WasmInstanceObject

Those are not safe in combination with the sandbox as they are stored as
raw pointers. Instead of turning them into ExternalPointers (which use
the ExternalPointerTable indirection), this CL simply turns them into
on-heap ByteArrays which is cheaper and should be unproblematic
security-wise as their contents can be corrupted without causing memory
corruption outside the sandbox address space (just incorrect behaviour
and/or further memory corruption *inside* the sandbox, which is fine).

Bug: chromium:1335046
Change-Id: Id2b901a58b7d6c91dd7596fca553d7c76cbc61ec
Cq-Include-Trybots: luci.v8.try:v8_linux64_heap_sandbox_dbg_ng,v8_linux_arm64_sim_heap_sandbox_dbg_ng
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3845636Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Samuel Groß <saelo@chromium.org>
Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82765}
parent c37badf3
...@@ -308,6 +308,7 @@ path. Add it with -I<path> to the command line ...@@ -308,6 +308,7 @@ path. Add it with -I<path> to the command line
// V8_HAS_BUILTIN_SADD_OVERFLOW - __builtin_sadd_overflow() supported // V8_HAS_BUILTIN_SADD_OVERFLOW - __builtin_sadd_overflow() supported
// V8_HAS_BUILTIN_SSUB_OVERFLOW - __builtin_ssub_overflow() supported // V8_HAS_BUILTIN_SSUB_OVERFLOW - __builtin_ssub_overflow() supported
// V8_HAS_BUILTIN_UADD_OVERFLOW - __builtin_uadd_overflow() supported // V8_HAS_BUILTIN_UADD_OVERFLOW - __builtin_uadd_overflow() supported
// V8_HAS_BUILTIN_SMUL_OVERFLOW - __builtin_smul_overflow() supported
// V8_HAS_COMPUTED_GOTO - computed goto/labels as values // V8_HAS_COMPUTED_GOTO - computed goto/labels as values
// supported // supported
// V8_HAS_DECLSPEC_NOINLINE - __declspec(noinline) supported // V8_HAS_DECLSPEC_NOINLINE - __declspec(noinline) supported
...@@ -357,6 +358,7 @@ path. Add it with -I<path> to the command line ...@@ -357,6 +358,7 @@ path. Add it with -I<path> to the command line
# define V8_HAS_BUILTIN_SADD_OVERFLOW (__has_builtin(__builtin_sadd_overflow)) # define V8_HAS_BUILTIN_SADD_OVERFLOW (__has_builtin(__builtin_sadd_overflow))
# define V8_HAS_BUILTIN_SSUB_OVERFLOW (__has_builtin(__builtin_ssub_overflow)) # define V8_HAS_BUILTIN_SSUB_OVERFLOW (__has_builtin(__builtin_ssub_overflow))
# define V8_HAS_BUILTIN_UADD_OVERFLOW (__has_builtin(__builtin_uadd_overflow)) # define V8_HAS_BUILTIN_UADD_OVERFLOW (__has_builtin(__builtin_uadd_overflow))
# define V8_HAS_BUILTIN_SMUL_OVERFLOW (__has_builtin(__builtin_smul_overflow))
# define V8_HAS_BUILTIN_UNREACHABLE (__has_builtin(__builtin_unreachable)) # define V8_HAS_BUILTIN_UNREACHABLE (__has_builtin(__builtin_unreachable))
// Clang has no __has_feature for computed gotos. // Clang has no __has_feature for computed gotos.
......
...@@ -90,14 +90,6 @@ int64_t SignedSaturatedSub64(int64_t lhs, int64_t rhs) { ...@@ -90,14 +90,6 @@ int64_t SignedSaturatedSub64(int64_t lhs, int64_t rhs) {
return lhs - rhs; return lhs - rhs;
} }
bool SignedMulOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
// Compute the result as {int64_t}, then check for overflow.
int64_t result = int64_t{lhs} * int64_t{rhs};
*val = static_cast<int32_t>(result);
using limits = std::numeric_limits<int32_t>;
return result < limits::min() || result > limits::max();
}
} // namespace bits } // namespace bits
} // namespace base } // namespace base
} // namespace v8 } // namespace v8
...@@ -261,7 +261,17 @@ inline bool SignedSubOverflow32(int32_t lhs, int32_t rhs, int32_t* val) { ...@@ -261,7 +261,17 @@ inline bool SignedSubOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
// SignedMulOverflow32(lhs,rhs,val) performs a signed multiplication of |lhs| // SignedMulOverflow32(lhs,rhs,val) performs a signed multiplication of |lhs|
// and |rhs| and stores the result into the variable pointed to by |val| and // and |rhs| and stores the result into the variable pointed to by |val| and
// returns true if the signed multiplication resulted in an overflow. // returns true if the signed multiplication resulted in an overflow.
V8_BASE_EXPORT bool SignedMulOverflow32(int32_t lhs, int32_t rhs, int32_t* val); inline bool SignedMulOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
#if V8_HAS_BUILTIN_SMUL_OVERFLOW
return __builtin_smul_overflow(lhs, rhs, val);
#else
// Compute the result as {int64_t}, then check for overflow.
int64_t result = int64_t{lhs} * int64_t{rhs};
*val = static_cast<int32_t>(result);
using limits = std::numeric_limits<int32_t>;
return result < limits::min() || result > limits::max();
#endif
}
// SignedAddOverflow64(lhs,rhs,val) performs a signed summation of |lhs| and // SignedAddOverflow64(lhs,rhs,val) performs a signed summation of |lhs| and
// |rhs| and stores the result into the variable pointed to by |val| and // |rhs| and stores the result into the variable pointed to by |val| and
......
...@@ -2715,12 +2715,15 @@ Node* WasmGraphBuilder::BuildImportCall(const wasm::FunctionSig* sig, ...@@ -2715,12 +2715,15 @@ Node* WasmGraphBuilder::BuildImportCall(const wasm::FunctionSig* sig,
// Load the target from the imported_targets array at the offset of // Load the target from the imported_targets array at the offset of
// {func_index}. // {func_index}.
Node* func_index_times_pointersize = gasm_->IntMul( Node* offset = gasm_->IntAdd(
func_index_intptr, gasm_->IntPtrConstant(kSystemPointerSize)); gasm_->IntMul(func_index_intptr,
Node* imported_targets = gasm_->IntPtrConstant(kSystemPointerSize)),
LOAD_INSTANCE_FIELD(ImportedFunctionTargets, MachineType::Pointer()); gasm_->IntPtrConstant(
Node* target_node = gasm_->LoadImmutableFromObject( wasm::ObjectAccess::ToTagged(FixedArray::kObjectsOffset)));
MachineType::Pointer(), imported_targets, func_index_times_pointersize); Node* imported_targets = LOAD_INSTANCE_FIELD(ImportedFunctionTargets,
MachineType::TaggedPointer());
Node* target_node = gasm_->LoadImmutableFromObject(MachineType::Pointer(),
imported_targets, offset);
args[0] = target_node; args[0] = target_node;
switch (continuation) { switch (continuation) {
...@@ -3254,25 +3257,32 @@ Node* WasmGraphBuilder::BuildCallToRuntime(Runtime::FunctionId f, ...@@ -3254,25 +3257,32 @@ Node* WasmGraphBuilder::BuildCallToRuntime(Runtime::FunctionId f,
void WasmGraphBuilder::GetGlobalBaseAndOffset(const wasm::WasmGlobal& global, void WasmGraphBuilder::GetGlobalBaseAndOffset(const wasm::WasmGlobal& global,
Node** base, Node** offset) { Node** base, Node** offset) {
if (global.mutability && global.imported) { if (global.mutability && global.imported) {
Node* base_or_index = gasm_->LoadFromObject( Node* imported_mutable_globals = LOAD_INSTANCE_FIELD(
MachineType::UintPtr(), ImportedMutableGlobals, MachineType::TaggedPointer());
LOAD_INSTANCE_FIELD(ImportedMutableGlobals, MachineType::UintPtr()), Node* field_offset = Int32Constant(
Int32Constant(global.index * kSystemPointerSize)); wasm::ObjectAccess::ElementOffsetInTaggedFixedAddressArray(
global.index));
if (global.type.is_reference()) { if (global.type.is_reference()) {
// Load the base from the ImportedMutableGlobalsBuffer of the instance. // Load the base from the ImportedMutableGlobalsBuffer of the instance.
Node* buffers = LOAD_INSTANCE_FIELD(ImportedMutableGlobalsBuffers, Node* buffers = LOAD_INSTANCE_FIELD(ImportedMutableGlobalsBuffers,
MachineType::TaggedPointer()); MachineType::TaggedPointer());
*base = gasm_->LoadFixedArrayElementAny(buffers, global.index); *base = gasm_->LoadFixedArrayElementAny(buffers, global.index);
// For this case, {base_or_index} gives the index of the global in the Node* index = gasm_->LoadFromObject(
// buffer. From the index, calculate the actual offset in the FixedArray. MachineType::Int32(), imported_mutable_globals, field_offset);
// This is kHeaderSize + (index * kTaggedSize). // For this case, {index} gives the index of the global in the buffer.
// From the index, calculate the actual offset in the FixedArray. This is
// kHeaderSize + (index * kTaggedSize).
*offset = gasm_->IntAdd( *offset = gasm_->IntAdd(
gasm_->IntMul(base_or_index, gasm_->IntPtrConstant(kTaggedSize)), gasm_->IntMul(index, gasm_->IntPtrConstant(kTaggedSize)),
gasm_->IntPtrConstant( gasm_->IntPtrConstant(
wasm::ObjectAccess::ToTagged(FixedArray::kObjectsOffset))); wasm::ObjectAccess::ToTagged(FixedArray::kObjectsOffset)));
} else { } else {
*base = base_or_index; MachineType machine_type = V8_ENABLE_SANDBOX_BOOL
? MachineType::SandboxedPointer()
: MachineType::UintPtr();
*base = gasm_->LoadFromObject(machine_type, imported_mutable_globals,
field_offset);
*offset = gasm_->IntPtrConstant(0); *offset = gasm_->IntPtrConstant(0);
} }
} else if (global.type.is_reference()) { } else if (global.type.is_reference()) {
...@@ -3281,11 +3291,10 @@ void WasmGraphBuilder::GetGlobalBaseAndOffset(const wasm::WasmGlobal& global, ...@@ -3281,11 +3291,10 @@ void WasmGraphBuilder::GetGlobalBaseAndOffset(const wasm::WasmGlobal& global,
*offset = gasm_->IntPtrConstant( *offset = gasm_->IntPtrConstant(
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(global.offset)); wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(global.offset));
} else { } else {
#ifdef V8_ENABLE_SANDBOX MachineType machine_type = V8_ENABLE_SANDBOX_BOOL
*base = LOAD_INSTANCE_FIELD(GlobalsStart, MachineType::SandboxedPointer()); ? MachineType::SandboxedPointer()
#else : MachineType::UintPtr();
*base = LOAD_INSTANCE_FIELD(GlobalsStart, MachineType::UintPtr()); *base = LOAD_INSTANCE_FIELD(GlobalsStart, machine_type);
#endif
*offset = gasm_->IntPtrConstant(global.offset); *offset = gasm_->IntPtrConstant(global.offset);
} }
} }
...@@ -5035,10 +5044,13 @@ void WasmGraphBuilder::DataDrop(uint32_t data_segment_index, ...@@ -5035,10 +5044,13 @@ void WasmGraphBuilder::DataDrop(uint32_t data_segment_index,
DCHECK_LT(data_segment_index, env_->module->num_declared_data_segments); DCHECK_LT(data_segment_index, env_->module->num_declared_data_segments);
Node* seg_size_array = Node* seg_size_array =
LOAD_INSTANCE_FIELD(DataSegmentSizes, MachineType::Pointer()); LOAD_INSTANCE_FIELD(DataSegmentSizes, MachineType::TaggedPointer());
static_assert(wasm::kV8MaxWasmDataSegments <= kMaxUInt32 >> 2); static_assert(wasm::kV8MaxWasmDataSegments <= kMaxUInt32 >> 2);
auto access = ObjectAccess(MachineType::Int32(), kNoWriteBarrier); auto access = ObjectAccess(MachineType::Int32(), kNoWriteBarrier);
gasm_->StoreToObject(access, seg_size_array, data_segment_index << 2, gasm_->StoreToObject(
access, seg_size_array,
wasm::ObjectAccess::ElementOffsetInTaggedFixedUInt32Array(
data_segment_index),
Int32Constant(0)); Int32Constant(0));
} }
...@@ -5137,10 +5149,12 @@ void WasmGraphBuilder::ElemDrop(uint32_t elem_segment_index, ...@@ -5137,10 +5149,12 @@ void WasmGraphBuilder::ElemDrop(uint32_t elem_segment_index,
DCHECK_LT(elem_segment_index, env_->module->elem_segments.size()); DCHECK_LT(elem_segment_index, env_->module->elem_segments.size());
Node* dropped_elem_segments = Node* dropped_elem_segments =
LOAD_INSTANCE_FIELD(DroppedElemSegments, MachineType::Pointer()); LOAD_INSTANCE_FIELD(DroppedElemSegments, MachineType::TaggedPointer());
auto store_rep = auto store_rep =
StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier); StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier);
gasm_->Store(store_rep, dropped_elem_segments, elem_segment_index, gasm_->Store(store_rep, dropped_elem_segments,
wasm::ObjectAccess::ElementOffsetInTaggedFixedUInt8Array(
elem_segment_index),
Int32Constant(1)); Int32Constant(1));
} }
......
...@@ -2011,7 +2011,6 @@ void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) { ...@@ -2011,7 +2011,6 @@ void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) {
PRINT_OPTIONAL_WASM_INSTANCE_FIELD(indirect_function_tables, Brief); PRINT_OPTIONAL_WASM_INSTANCE_FIELD(indirect_function_tables, Brief);
PRINT_WASM_INSTANCE_FIELD(imported_function_refs, Brief); PRINT_WASM_INSTANCE_FIELD(imported_function_refs, Brief);
PRINT_OPTIONAL_WASM_INSTANCE_FIELD(indirect_function_table_refs, Brief); PRINT_OPTIONAL_WASM_INSTANCE_FIELD(indirect_function_table_refs, Brief);
PRINT_OPTIONAL_WASM_INSTANCE_FIELD(managed_native_allocations, Brief);
PRINT_OPTIONAL_WASM_INSTANCE_FIELD(tags_table, Brief); PRINT_OPTIONAL_WASM_INSTANCE_FIELD(tags_table, Brief);
PRINT_OPTIONAL_WASM_INSTANCE_FIELD(wasm_internal_functions, Brief); PRINT_OPTIONAL_WASM_INSTANCE_FIELD(wasm_internal_functions, Brief);
PRINT_WASM_INSTANCE_FIELD(managed_object_maps, Brief); PRINT_WASM_INSTANCE_FIELD(managed_object_maps, Brief);
...@@ -2025,18 +2024,18 @@ void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) { ...@@ -2025,18 +2024,18 @@ void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) {
PRINT_WASM_INSTANCE_FIELD(new_allocation_top_address, to_void_ptr); PRINT_WASM_INSTANCE_FIELD(new_allocation_top_address, to_void_ptr);
PRINT_WASM_INSTANCE_FIELD(old_allocation_limit_address, to_void_ptr); PRINT_WASM_INSTANCE_FIELD(old_allocation_limit_address, to_void_ptr);
PRINT_WASM_INSTANCE_FIELD(old_allocation_top_address, to_void_ptr); PRINT_WASM_INSTANCE_FIELD(old_allocation_top_address, to_void_ptr);
PRINT_WASM_INSTANCE_FIELD(imported_function_targets, to_void_ptr); PRINT_WASM_INSTANCE_FIELD(imported_function_targets, Brief);
PRINT_WASM_INSTANCE_FIELD(globals_start, to_void_ptr); PRINT_WASM_INSTANCE_FIELD(globals_start, to_void_ptr);
PRINT_WASM_INSTANCE_FIELD(imported_mutable_globals, to_void_ptr); PRINT_WASM_INSTANCE_FIELD(imported_mutable_globals, Brief);
PRINT_WASM_INSTANCE_FIELD(indirect_function_table_size, +); PRINT_WASM_INSTANCE_FIELD(indirect_function_table_size, +);
PRINT_WASM_INSTANCE_FIELD(indirect_function_table_sig_ids, to_void_ptr); PRINT_WASM_INSTANCE_FIELD(indirect_function_table_sig_ids, to_void_ptr);
PRINT_WASM_INSTANCE_FIELD(indirect_function_table_targets, to_void_ptr); PRINT_WASM_INSTANCE_FIELD(indirect_function_table_targets, to_void_ptr);
PRINT_WASM_INSTANCE_FIELD(isorecursive_canonical_types, PRINT_WASM_INSTANCE_FIELD(isorecursive_canonical_types,
reinterpret_cast<const uint32_t*>); reinterpret_cast<const uint32_t*>);
PRINT_WASM_INSTANCE_FIELD(jump_table_start, to_void_ptr); PRINT_WASM_INSTANCE_FIELD(jump_table_start, to_void_ptr);
PRINT_WASM_INSTANCE_FIELD(data_segment_starts, to_void_ptr); PRINT_WASM_INSTANCE_FIELD(data_segment_starts, Brief);
PRINT_WASM_INSTANCE_FIELD(data_segment_sizes, to_void_ptr); PRINT_WASM_INSTANCE_FIELD(data_segment_sizes, Brief);
PRINT_WASM_INSTANCE_FIELD(dropped_elem_segments, to_void_ptr); PRINT_WASM_INSTANCE_FIELD(dropped_elem_segments, Brief);
PRINT_WASM_INSTANCE_FIELD(hook_on_function_call_address, to_void_ptr); PRINT_WASM_INSTANCE_FIELD(hook_on_function_call_address, to_void_ptr);
PRINT_WASM_INSTANCE_FIELD(tiering_budget_array, to_void_ptr); PRINT_WASM_INSTANCE_FIELD(tiering_budget_array, to_void_ptr);
PRINT_WASM_INSTANCE_FIELD(break_on_entry, static_cast<int>); PRINT_WASM_INSTANCE_FIELD(break_on_entry, static_cast<int>);
......
...@@ -621,6 +621,20 @@ void ByteArray::set_int(int offset, int value) { ...@@ -621,6 +621,20 @@ void ByteArray::set_int(int offset, int value) {
WriteField<int>(kHeaderSize + offset, value); WriteField<int>(kHeaderSize + offset, value);
} }
Address ByteArray::get_sandboxed_pointer(int offset) const {
DCHECK_GE(offset, 0);
DCHECK_LE(offset + sizeof(Address), length());
PtrComprCageBase sandbox_base = GetPtrComprCageBase(*this);
return ReadSandboxedPointerField(kHeaderSize + offset, sandbox_base);
}
void ByteArray::set_sandboxed_pointer(int offset, Address value) {
DCHECK_GE(offset, 0);
DCHECK_LE(offset + sizeof(Address), length());
PtrComprCageBase sandbox_base = GetPtrComprCageBase(*this);
WriteSandboxedPointerField(kHeaderSize + offset, sandbox_base, value);
}
void ByteArray::copy_in(int offset, const byte* buffer, int slice_length) { void ByteArray::copy_in(int offset, const byte* buffer, int slice_length) {
DCHECK_GE(offset, 0); DCHECK_GE(offset, 0);
DCHECK_GE(slice_length, 0); DCHECK_GE(slice_length, 0);
...@@ -673,8 +687,10 @@ FixedIntegerArray<T> FixedIntegerArray<T>::cast(Object object) { ...@@ -673,8 +687,10 @@ FixedIntegerArray<T> FixedIntegerArray<T>::cast(Object object) {
template <typename T> template <typename T>
Handle<FixedIntegerArray<T>> FixedIntegerArray<T>::New( Handle<FixedIntegerArray<T>> FixedIntegerArray<T>::New(
Isolate* isolate, int length, AllocationType allocation) { Isolate* isolate, int length, AllocationType allocation) {
int byte_length;
CHECK(!base::bits::SignedMulOverflow32(length, sizeof(T), &byte_length));
return Handle<FixedIntegerArray<T>>::cast( return Handle<FixedIntegerArray<T>>::cast(
isolate->factory()->NewByteArray(length * sizeof(T), allocation)); isolate->factory()->NewByteArray(byte_length, allocation));
} }
template <typename T> template <typename T>
...@@ -711,8 +727,10 @@ PodArray<T> PodArray<T>::cast(Object object) { ...@@ -711,8 +727,10 @@ PodArray<T> PodArray<T>::cast(Object object) {
template <class T> template <class T>
Handle<PodArray<T>> PodArray<T>::New(Isolate* isolate, int length, Handle<PodArray<T>> PodArray<T>::New(Isolate* isolate, int length,
AllocationType allocation) { AllocationType allocation) {
int byte_length;
CHECK(!base::bits::SignedMulOverflow32(length, sizeof(T), &byte_length));
return Handle<PodArray<T>>::cast( return Handle<PodArray<T>>::cast(
isolate->factory()->NewByteArray(length * sizeof(T), allocation)); isolate->factory()->NewByteArray(byte_length, allocation));
} }
template <class T> template <class T>
......
...@@ -531,6 +531,9 @@ class ByteArray : public TorqueGeneratedByteArray<ByteArray, FixedArrayBase> { ...@@ -531,6 +531,9 @@ class ByteArray : public TorqueGeneratedByteArray<ByteArray, FixedArrayBase> {
inline int get_int(int offset) const; inline int get_int(int offset) const;
inline void set_int(int offset, int value); inline void set_int(int offset, int value);
inline Address get_sandboxed_pointer(int offset) const;
inline void set_sandboxed_pointer(int offset, Address value);
// Copy in / copy out whole byte slices. // Copy in / copy out whole byte slices.
inline void copy_out(int index, byte* buffer, int slice_length); inline void copy_out(int index, byte* buffer, int slice_length);
inline void copy_in(int index, const byte* buffer, int slice_length); inline void copy_in(int index, const byte* buffer, int slice_length);
...@@ -617,6 +620,10 @@ using FixedInt32Array = FixedIntegerArray<int32_t>; ...@@ -617,6 +620,10 @@ using FixedInt32Array = FixedIntegerArray<int32_t>;
using FixedUInt32Array = FixedIntegerArray<uint32_t>; using FixedUInt32Array = FixedIntegerArray<uint32_t>;
using FixedInt64Array = FixedIntegerArray<int64_t>; using FixedInt64Array = FixedIntegerArray<int64_t>;
using FixedUInt64Array = FixedIntegerArray<uint64_t>; using FixedUInt64Array = FixedIntegerArray<uint64_t>;
// Use with care! Raw addresses on the heap are not safe in combination with
// the sandbox. However, this can for example be used to store sandboxed
// pointers, which is safe.
using FixedAddressArray = FixedIntegerArray<Address>;
// Wrapper class for ByteArray which can store arbitrary C++ classes, as long // Wrapper class for ByteArray which can store arbitrary C++ classes, as long
// as they can be copied with memcpy. // as they can be copied with memcpy.
......
...@@ -741,19 +741,20 @@ RUNTIME_FUNCTION(Runtime_WasmArrayNewSegment) { ...@@ -741,19 +741,20 @@ RUNTIME_FUNCTION(Runtime_WasmArrayNewSegment) {
DCHECK_EQ(length_in_bytes / element_size, length); DCHECK_EQ(length_in_bytes / element_size, length);
if (!base::IsInBounds<uint32_t>( if (!base::IsInBounds<uint32_t>(
offset, length_in_bytes, offset, length_in_bytes,
instance->data_segment_sizes()[segment_index])) { instance->data_segment_sizes().get(segment_index))) {
return ThrowWasmError(isolate, return ThrowWasmError(isolate,
MessageTemplate::kWasmTrapDataSegmentOutOfBounds); MessageTemplate::kWasmTrapDataSegmentOutOfBounds);
} }
Address source = instance->data_segment_starts()[segment_index] + offset; Address source =
instance->data_segment_starts().get(segment_index) + offset;
return *isolate->factory()->NewWasmArrayFromMemory(length, rtt, source); return *isolate->factory()->NewWasmArrayFromMemory(length, rtt, source);
} else { } else {
const wasm::WasmElemSegment* elem_segment = const wasm::WasmElemSegment* elem_segment =
&instance->module()->elem_segments[segment_index]; &instance->module()->elem_segments[segment_index];
if (!base::IsInBounds<size_t>( if (!base::IsInBounds<size_t>(
offset, length, offset, length,
instance->dropped_elem_segments()[segment_index] instance->dropped_elem_segments().get(segment_index)
? 0 ? 0
: elem_segment->entries.size())) { : elem_segment->entries.size())) {
return ThrowWasmError( return ThrowWasmError(
......
...@@ -2491,18 +2491,20 @@ class LiftoffCompiler { ...@@ -2491,18 +2491,20 @@ class LiftoffCompiler {
LiftoffRegList* pinned, uint32_t* offset) { LiftoffRegList* pinned, uint32_t* offset) {
Register addr = pinned->set(__ GetUnusedRegister(kGpReg, {})).gp(); Register addr = pinned->set(__ GetUnusedRegister(kGpReg, {})).gp();
if (global->mutability && global->imported) { if (global->mutability && global->imported) {
LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kSystemPointerSize, LOAD_TAGGED_PTR_INSTANCE_FIELD(addr, ImportedMutableGlobals, *pinned);
*pinned); int field_offset =
__ Load(LiftoffRegister(addr), addr, no_reg, wasm::ObjectAccess::ElementOffsetInTaggedFixedAddressArray(
global->index * sizeof(Address), kPointerLoadType); global->index);
__ Load(LiftoffRegister(addr), addr, no_reg, field_offset,
kPointerLoadType);
*offset = 0; *offset = 0;
} else { } else {
LOAD_INSTANCE_FIELD(addr, GlobalsStart, kSystemPointerSize, *pinned); LOAD_INSTANCE_FIELD(addr, GlobalsStart, kSystemPointerSize, *pinned);
*offset = global->offset;
}
#ifdef V8_ENABLE_SANDBOX #ifdef V8_ENABLE_SANDBOX
__ DecodeSandboxedPointer(addr); __ DecodeSandboxedPointer(addr);
#endif #endif
*offset = global->offset;
}
return addr; return addr;
} }
...@@ -2524,12 +2526,14 @@ class LiftoffCompiler { ...@@ -2524,12 +2526,14 @@ class LiftoffCompiler {
Register imported_mutable_globals = Register imported_mutable_globals =
pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp(); pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp();
LOAD_INSTANCE_FIELD(imported_mutable_globals, ImportedMutableGlobals, LOAD_TAGGED_PTR_INSTANCE_FIELD(imported_mutable_globals,
kSystemPointerSize, *pinned); ImportedMutableGlobals, *pinned);
*offset = imported_mutable_globals; *offset = imported_mutable_globals;
int field_offset =
wasm::ObjectAccess::ElementOffsetInTaggedFixedAddressArray(
global->index);
__ Load(LiftoffRegister(*offset), imported_mutable_globals, no_reg, __ Load(LiftoffRegister(*offset), imported_mutable_globals, no_reg,
global->index * sizeof(Address), field_offset, LoadType::kI32Load);
kSystemPointerSize == 4 ? LoadType::kI32Load : LoadType::kI64Load);
__ emit_i32_shli(*offset, *offset, kTaggedSizeLog2); __ emit_i32_shli(*offset, *offset, kTaggedSizeLog2);
__ emit_i32_addi(*offset, *offset, __ emit_i32_addi(*offset, *offset,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0)); wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0));
...@@ -5259,14 +5263,15 @@ class LiftoffCompiler { ...@@ -5259,14 +5263,15 @@ class LiftoffCompiler {
Register seg_size_array = Register seg_size_array =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(seg_size_array, DataSegmentSizes, kSystemPointerSize, LOAD_TAGGED_PTR_INSTANCE_FIELD(seg_size_array, DataSegmentSizes, pinned);
pinned);
LiftoffRegister seg_index = LiftoffRegister seg_index =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)); pinned.set(__ GetUnusedRegister(kGpReg, pinned));
// Scale the seg_index for the array access. // Scale the seg_index for the array access.
__ LoadConstant(seg_index, __ LoadConstant(
WasmValue(imm.index << value_kind_size_log2(kI32))); seg_index,
WasmValue(wasm::ObjectAccess::ElementOffsetInTaggedFixedUInt32Array(
imm.index)));
// Set the length of the segment to '0' to drop it. // Set the length of the segment to '0' to drop it.
LiftoffRegister null_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); LiftoffRegister null_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
...@@ -5396,12 +5401,15 @@ class LiftoffCompiler { ...@@ -5396,12 +5401,15 @@ class LiftoffCompiler {
LiftoffRegList pinned; LiftoffRegList pinned;
Register dropped_elem_segments = Register dropped_elem_segments =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(dropped_elem_segments, DroppedElemSegments, LOAD_TAGGED_PTR_INSTANCE_FIELD(dropped_elem_segments, DroppedElemSegments,
kSystemPointerSize, pinned); pinned);
LiftoffRegister seg_index = LiftoffRegister seg_index =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)); pinned.set(__ GetUnusedRegister(kGpReg, pinned));
__ LoadConstant(seg_index, WasmValue(imm.index)); __ LoadConstant(
seg_index,
WasmValue(wasm::ObjectAccess::ElementOffsetInTaggedFixedUInt8Array(
imm.index)));
// Mark the segment as dropped by setting its value in the dropped // Mark the segment as dropped by setting its value in the dropped
// segments list to 1. // segments list to 1.
...@@ -7057,10 +7065,12 @@ class LiftoffCompiler { ...@@ -7057,10 +7065,12 @@ class LiftoffCompiler {
Register target = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); Register target = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
Register imported_targets = tmp; Register imported_targets = tmp;
LOAD_INSTANCE_FIELD(imported_targets, ImportedFunctionTargets, LOAD_TAGGED_PTR_INSTANCE_FIELD(imported_targets, ImportedFunctionTargets,
kSystemPointerSize, pinned); pinned);
__ Load(LiftoffRegister(target), imported_targets, no_reg, __ Load(
imm.index * sizeof(Address), kPointerLoadType); LiftoffRegister(target), imported_targets, no_reg,
wasm::ObjectAccess::ElementOffsetInTaggedFixedAddressArray(imm.index),
kPointerLoadType);
Register imported_function_refs = tmp; Register imported_function_refs = tmp;
LOAD_TAGGED_PTR_INSTANCE_FIELD(imported_function_refs, LOAD_TAGGED_PTR_INSTANCE_FIELD(imported_function_refs,
......
...@@ -264,7 +264,7 @@ void ConstantExpressionInterface::ArrayNewSegment( ...@@ -264,7 +264,7 @@ void ConstantExpressionInterface::ArrayNewSegment(
} }
Address source = Address source =
instance_->data_segment_starts()[segment_imm.index] + offset; instance_->data_segment_starts().get(segment_imm.index) + offset;
Handle<WasmArray> array_value = isolate_->factory()->NewWasmArrayFromMemory( Handle<WasmArray> array_value = isolate_->factory()->NewWasmArrayFromMemory(
length, Handle<Map>::cast(rtt.runtime_value.to_ref()), source); length, Handle<Map>::cast(rtt.runtime_value.to_ref()), source);
result->runtime_value = WasmValue(array_value, result_type); result->runtime_value = WasmValue(array_value, result_type);
......
...@@ -1420,7 +1420,6 @@ bool InstanceBuilder::ProcessImportedWasmGlobalObject( ...@@ -1420,7 +1420,6 @@ bool InstanceBuilder::ProcessImportedWasmGlobalObject(
if (global.mutability) { if (global.mutability) {
DCHECK_LT(global.index, module_->num_imported_mutable_globals); DCHECK_LT(global.index, module_->num_imported_mutable_globals);
Handle<Object> buffer; Handle<Object> buffer;
Address address_or_offset;
if (global.type.is_reference()) { if (global.type.is_reference()) {
static_assert(sizeof(global_object->offset()) <= sizeof(Address), static_assert(sizeof(global_object->offset()) <= sizeof(Address),
"The offset into the globals buffer does not fit into " "The offset into the globals buffer does not fit into "
...@@ -1428,17 +1427,19 @@ bool InstanceBuilder::ProcessImportedWasmGlobalObject( ...@@ -1428,17 +1427,19 @@ bool InstanceBuilder::ProcessImportedWasmGlobalObject(
buffer = handle(global_object->tagged_buffer(), isolate_); buffer = handle(global_object->tagged_buffer(), isolate_);
// For externref globals we use a relative offset, not an absolute // For externref globals we use a relative offset, not an absolute
// address. // address.
address_or_offset = static_cast<Address>(global_object->offset()); instance->imported_mutable_globals().set_int(
global.index * kSystemPointerSize, global_object->offset());
} else { } else {
buffer = handle(global_object->untagged_buffer(), isolate_); buffer = handle(global_object->untagged_buffer(), isolate_);
// It is safe in this case to store the raw pointer to the buffer // It is safe in this case to store the raw pointer to the buffer
// since the backing store of the JSArrayBuffer will not be // since the backing store of the JSArrayBuffer will not be
// relocated. // relocated.
address_or_offset = reinterpret_cast<Address>(raw_buffer_ptr( Address address = reinterpret_cast<Address>(raw_buffer_ptr(
Handle<JSArrayBuffer>::cast(buffer), global_object->offset())); Handle<JSArrayBuffer>::cast(buffer), global_object->offset()));
instance->imported_mutable_globals().set_sandboxed_pointer(
global.index * kSystemPointerSize, address);
} }
instance->imported_mutable_globals_buffers().set(global.index, *buffer); instance->imported_mutable_globals_buffers().set(global.index, *buffer);
instance->imported_mutable_globals()[global.index] = address_or_offset;
return true; return true;
} }
...@@ -1865,16 +1866,15 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) { ...@@ -1865,16 +1866,15 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
FixedArray::cast(buffers_array->get(global.index)), isolate_); FixedArray::cast(buffers_array->get(global.index)), isolate_);
// For externref globals we store the relative offset in the // For externref globals we store the relative offset in the
// imported_mutable_globals array instead of an absolute address. // imported_mutable_globals array instead of an absolute address.
Address addr = instance->imported_mutable_globals()[global.index]; offset = instance->imported_mutable_globals().get_int(
DCHECK_LE(addr, static_cast<Address>( global.index * kSystemPointerSize);
std::numeric_limits<uint32_t>::max()));
offset = static_cast<uint32_t>(addr);
} else { } else {
untagged_buffer = untagged_buffer =
handle(JSArrayBuffer::cast(buffers_array->get(global.index)), handle(JSArrayBuffer::cast(buffers_array->get(global.index)),
isolate_); isolate_);
Address global_addr = Address global_addr =
instance->imported_mutable_globals()[global.index]; instance->imported_mutable_globals().get_sandboxed_pointer(
global.index * kSystemPointerSize);
size_t buffer_size = untagged_buffer->byte_length(); size_t buffer_size = untagged_buffer->byte_length();
Address backing_store = Address backing_store =
...@@ -2026,7 +2026,7 @@ base::Optional<MessageTemplate> LoadElemSegmentImpl( ...@@ -2026,7 +2026,7 @@ base::Optional<MessageTemplate> LoadElemSegmentImpl(
} }
if (!base::IsInBounds<uint64_t>( if (!base::IsInBounds<uint64_t>(
src, count, src, count,
instance->dropped_elem_segments()[segment_index] == 0 instance->dropped_elem_segments().get(segment_index) == 0
? elem_segment.entries.size() ? elem_segment.entries.size()
: 0)) { : 0)) {
return {MessageTemplate::kWasmTrapElementSegmentOutOfBounds}; return {MessageTemplate::kWasmTrapElementSegmentOutOfBounds};
...@@ -2082,7 +2082,7 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) { ...@@ -2082,7 +2082,7 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
table_index, segment_index, dst, src, count); table_index, segment_index, dst, src, count);
// Set the active segments to being already dropped, since table.init on // Set the active segments to being already dropped, since table.init on
// a dropped passive segment and an active segment have the same behavior. // a dropped passive segment and an active segment have the same behavior.
instance->dropped_elem_segments()[segment_index] = 1; instance->dropped_elem_segments().set(segment_index, 1);
if (opt_error.has_value()) { if (opt_error.has_value()) {
thrower_->RuntimeError( thrower_->RuntimeError(
"%s", MessageFormatter::TemplateString(opt_error.value())); "%s", MessageFormatter::TemplateString(opt_error.value()));
......
...@@ -28,6 +28,21 @@ class ObjectAccess : public AllStatic { ...@@ -28,6 +28,21 @@ class ObjectAccess : public AllStatic {
return ToTagged(FixedArray::OffsetOfElementAt(index)); return ToTagged(FixedArray::OffsetOfElementAt(index));
} }
// Get the offset into a fixed uint8 array for a given {index}.
static constexpr int ElementOffsetInTaggedFixedUInt8Array(int index) {
return ToTagged(FixedUInt8Array::OffsetOfElementAt(index));
}
// Get the offset into a fixed uint32 array for a given {index}.
static constexpr int ElementOffsetInTaggedFixedUInt32Array(int index) {
return ToTagged(FixedUInt32Array::OffsetOfElementAt(index));
}
// Get the offset into a fixed address array for a given {index}.
static constexpr int ElementOffsetInTaggedFixedAddressArray(int index) {
return ToTagged(FixedAddressArray::OffsetOfElementAt(index));
}
// Get the offset of the context stored in a {JSFunction} object. // Get the offset of the context stored in a {JSFunction} object.
static constexpr int ContextOffsetInTaggedJSFunction() { static constexpr int ContextOffsetInTaggedJSFunction() {
return ToTagged(JSFunction::kContextOffset); return ToTagged(JSFunction::kContextOffset);
......
...@@ -488,11 +488,11 @@ int32_t memory_init_wrapper(Address data) { ...@@ -488,11 +488,11 @@ int32_t memory_init_wrapper(Address data) {
uint64_t mem_size = instance.memory_size(); uint64_t mem_size = instance.memory_size();
if (!base::IsInBounds<uint64_t>(dst, size, mem_size)) return kOutOfBounds; if (!base::IsInBounds<uint64_t>(dst, size, mem_size)) return kOutOfBounds;
uint32_t seg_size = instance.data_segment_sizes()[seg_index]; uint32_t seg_size = instance.data_segment_sizes().get(seg_index);
if (!base::IsInBounds<uint32_t>(src, size, seg_size)) return kOutOfBounds; if (!base::IsInBounds<uint32_t>(src, size, seg_size)) return kOutOfBounds;
byte* seg_start = byte* seg_start =
reinterpret_cast<byte*>(instance.data_segment_starts()[seg_index]); reinterpret_cast<byte*>(instance.data_segment_starts().get(seg_index));
std::memcpy(EffectiveAddress(instance, dst), seg_start + src, size); std::memcpy(EffectiveAddress(instance, dst), seg_start + src, size);
return kSuccess; return kSuccess;
} }
......
...@@ -207,12 +207,12 @@ PRIMITIVE_ACCESSORS(WasmInstanceObject, old_allocation_top_address, Address*, ...@@ -207,12 +207,12 @@ PRIMITIVE_ACCESSORS(WasmInstanceObject, old_allocation_top_address, Address*,
kOldAllocationTopAddressOffset) kOldAllocationTopAddressOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, isorecursive_canonical_types, PRIMITIVE_ACCESSORS(WasmInstanceObject, isorecursive_canonical_types,
const uint32_t*, kIsorecursiveCanonicalTypesOffset) const uint32_t*, kIsorecursiveCanonicalTypesOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, imported_function_targets, Address*,
kImportedFunctionTargetsOffset)
SANDBOXED_POINTER_ACCESSORS(WasmInstanceObject, globals_start, byte*, SANDBOXED_POINTER_ACCESSORS(WasmInstanceObject, globals_start, byte*,
kGlobalsStartOffset) kGlobalsStartOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, imported_mutable_globals, Address*, ACCESSORS(WasmInstanceObject, imported_mutable_globals, ByteArray,
kImportedMutableGlobalsOffset) kImportedMutableGlobalsOffset)
ACCESSORS(WasmInstanceObject, imported_function_targets, FixedAddressArray,
kImportedFunctionTargetsOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, indirect_function_table_size, uint32_t, PRIMITIVE_ACCESSORS(WasmInstanceObject, indirect_function_table_size, uint32_t,
kIndirectFunctionTableSizeOffset) kIndirectFunctionTableSizeOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, indirect_function_table_sig_ids, PRIMITIVE_ACCESSORS(WasmInstanceObject, indirect_function_table_sig_ids,
...@@ -221,16 +221,16 @@ PRIMITIVE_ACCESSORS(WasmInstanceObject, indirect_function_table_targets, ...@@ -221,16 +221,16 @@ PRIMITIVE_ACCESSORS(WasmInstanceObject, indirect_function_table_targets,
Address*, kIndirectFunctionTableTargetsOffset) Address*, kIndirectFunctionTableTargetsOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, jump_table_start, Address, PRIMITIVE_ACCESSORS(WasmInstanceObject, jump_table_start, Address,
kJumpTableStartOffset) kJumpTableStartOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, data_segment_starts, Address*,
kDataSegmentStartsOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, data_segment_sizes, uint32_t*,
kDataSegmentSizesOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, dropped_elem_segments, byte*,
kDroppedElemSegmentsOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, hook_on_function_call_address, Address, PRIMITIVE_ACCESSORS(WasmInstanceObject, hook_on_function_call_address, Address,
kHookOnFunctionCallAddressOffset) kHookOnFunctionCallAddressOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, tiering_budget_array, uint32_t*, PRIMITIVE_ACCESSORS(WasmInstanceObject, tiering_budget_array, uint32_t*,
kTieringBudgetArrayOffset) kTieringBudgetArrayOffset)
ACCESSORS(WasmInstanceObject, data_segment_starts, FixedAddressArray,
kDataSegmentStartsOffset)
ACCESSORS(WasmInstanceObject, data_segment_sizes, FixedUInt32Array,
kDataSegmentSizesOffset)
ACCESSORS(WasmInstanceObject, dropped_elem_segments, FixedUInt8Array,
kDroppedElemSegmentsOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, break_on_entry, uint8_t, PRIMITIVE_ACCESSORS(WasmInstanceObject, break_on_entry, uint8_t,
kBreakOnEntryOffset) kBreakOnEntryOffset)
...@@ -253,8 +253,6 @@ ACCESSORS(WasmInstanceObject, imported_function_refs, FixedArray, ...@@ -253,8 +253,6 @@ ACCESSORS(WasmInstanceObject, imported_function_refs, FixedArray,
kImportedFunctionRefsOffset) kImportedFunctionRefsOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, indirect_function_table_refs, FixedArray, OPTIONAL_ACCESSORS(WasmInstanceObject, indirect_function_table_refs, FixedArray,
kIndirectFunctionTableRefsOffset) kIndirectFunctionTableRefsOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, managed_native_allocations, Foreign,
kManagedNativeAllocationsOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, tags_table, FixedArray, kTagsTableOffset) OPTIONAL_ACCESSORS(WasmInstanceObject, tags_table, FixedArray, kTagsTableOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, wasm_internal_functions, FixedArray, OPTIONAL_ACCESSORS(WasmInstanceObject, wasm_internal_functions, FixedArray,
kWasmInternalFunctionsOffset) kWasmInternalFunctionsOffset)
......
...@@ -43,47 +43,6 @@ using WasmModule = wasm::WasmModule; ...@@ -43,47 +43,6 @@ using WasmModule = wasm::WasmModule;
namespace { namespace {
// Manages the natively-allocated memory for a WasmInstanceObject. Since
// an instance finalizer is not guaranteed to run upon isolate shutdown,
// we must use a Managed<WasmInstanceNativeAllocations> to guarantee
// it is freed.
class WasmInstanceNativeAllocations {
public:
WasmInstanceNativeAllocations(Handle<WasmInstanceObject> instance,
size_t num_imported_functions,
size_t num_imported_mutable_globals,
size_t num_data_segments,
size_t num_elem_segments)
: imported_function_targets_(new Address[num_imported_functions]),
imported_mutable_globals_(new Address[num_imported_mutable_globals]),
data_segment_starts_(new Address[num_data_segments]),
data_segment_sizes_(new uint32_t[num_data_segments]),
dropped_elem_segments_(new uint8_t[num_elem_segments]) {
instance->set_imported_function_targets(imported_function_targets_.get());
instance->set_imported_mutable_globals(imported_mutable_globals_.get());
instance->set_data_segment_starts(data_segment_starts_.get());
instance->set_data_segment_sizes(data_segment_sizes_.get());
instance->set_dropped_elem_segments(dropped_elem_segments_.get());
}
private:
const std::unique_ptr<Address[]> imported_function_targets_;
const std::unique_ptr<Address[]> imported_mutable_globals_;
const std::unique_ptr<Address[]> data_segment_starts_;
const std::unique_ptr<uint32_t[]> data_segment_sizes_;
const std::unique_ptr<uint8_t[]> dropped_elem_segments_;
};
size_t EstimateNativeAllocationsSize(const WasmModule* module) {
size_t estimate =
sizeof(WasmInstanceNativeAllocations) +
(1 * kSystemPointerSize * module->num_imported_mutable_globals) +
(2 * kSystemPointerSize * module->num_imported_functions) +
((kSystemPointerSize + sizeof(uint32_t) + sizeof(uint8_t)) *
module->num_declared_data_segments);
return estimate;
}
enum DispatchTableElements : int { enum DispatchTableElements : int {
kDispatchTableInstanceOffset, kDispatchTableInstanceOffset,
kDispatchTableIndexOffset, kDispatchTableIndexOffset,
...@@ -1193,8 +1152,8 @@ void ImportedFunctionEntry::SetWasmToJs( ...@@ -1193,8 +1152,8 @@ void ImportedFunctionEntry::SetWasmToJs(
Handle<WasmApiFunctionRef> ref = Handle<WasmApiFunctionRef> ref =
isolate->factory()->NewWasmApiFunctionRef(callable, suspend, instance_); isolate->factory()->NewWasmApiFunctionRef(callable, suspend, instance_);
instance_->imported_function_refs().set(index_, *ref); instance_->imported_function_refs().set(index_, *ref);
instance_->imported_function_targets()[index_] = instance_->imported_function_targets().set(
wasm_to_js_wrapper->instruction_start(); index_, wasm_to_js_wrapper->instruction_start());
} }
void ImportedFunctionEntry::SetWasmToWasm(WasmInstanceObject instance, void ImportedFunctionEntry::SetWasmToWasm(WasmInstanceObject instance,
...@@ -1203,7 +1162,7 @@ void ImportedFunctionEntry::SetWasmToWasm(WasmInstanceObject instance, ...@@ -1203,7 +1162,7 @@ void ImportedFunctionEntry::SetWasmToWasm(WasmInstanceObject instance,
", target=0x%" PRIxPTR "}\n", ", target=0x%" PRIxPTR "}\n",
instance_->ptr(), index_, instance.ptr(), call_target); instance_->ptr(), index_, instance.ptr(), call_target);
instance_->imported_function_refs().set(index_, instance); instance_->imported_function_refs().set(index_, instance);
instance_->imported_function_targets()[index_] = call_target; instance_->imported_function_targets().set(index_, call_target);
} }
// Returns an empty Object() if no callable is available, a JSReceiver // Returns an empty Object() if no callable is available, a JSReceiver
...@@ -1223,7 +1182,7 @@ Object ImportedFunctionEntry::object_ref() { ...@@ -1223,7 +1182,7 @@ Object ImportedFunctionEntry::object_ref() {
} }
Address ImportedFunctionEntry::target() { Address ImportedFunctionEntry::target() {
return instance_->imported_function_targets()[index_]; return instance_->imported_function_targets().get(index_);
} }
// static // static
...@@ -1266,17 +1225,35 @@ Handle<WasmInstanceObject> WasmInstanceObject::New( ...@@ -1266,17 +1225,35 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
WasmInstanceObject::cast(*instance_object), isolate); WasmInstanceObject::cast(*instance_object), isolate);
instance->clear_padding(); instance->clear_padding();
// Initialize the imported function arrays.
auto module = module_object->module(); auto module = module_object->module();
auto num_imported_functions = module->num_imported_functions; auto num_imported_functions = module->num_imported_functions;
auto num_imported_mutable_globals = module->num_imported_mutable_globals; Handle<FixedAddressArray> imported_function_targets =
auto num_data_segments = module->num_declared_data_segments; FixedAddressArray::New(isolate, num_imported_functions);
size_t native_allocations_size = EstimateNativeAllocationsSize(module); instance->set_imported_function_targets(*imported_function_targets);
auto native_allocations = Managed<WasmInstanceNativeAllocations>::Allocate(
isolate, native_allocations_size, instance, num_imported_functions, int num_imported_mutable_globals = module->num_imported_mutable_globals;
num_imported_mutable_globals, num_data_segments, // The imported_mutable_globals is essentially a FixedAddressArray (storing
module->elem_segments.size()); // sandboxed pointers), but some entries (the indices for reference-type
instance->set_managed_native_allocations(*native_allocations); // globals) are accessed as 32-bit integers which is more convenient with a
// raw ByteArray.
Handle<ByteArray> imported_mutable_globals =
FixedAddressArray::New(isolate, num_imported_mutable_globals);
instance->set_imported_mutable_globals(*imported_mutable_globals);
int num_data_segments = module->num_declared_data_segments;
Handle<FixedAddressArray> data_segment_starts =
FixedAddressArray::New(isolate, num_data_segments);
instance->set_data_segment_starts(*data_segment_starts);
Handle<FixedUInt32Array> data_segment_sizes =
FixedUInt32Array::New(isolate, num_data_segments);
instance->set_data_segment_sizes(*data_segment_sizes);
int num_elem_segments = static_cast<int>(module->elem_segments.size());
Handle<FixedUInt8Array> dropped_elem_segments =
FixedUInt8Array::New(isolate, num_elem_segments);
instance->set_dropped_elem_segments(*dropped_elem_segments);
Handle<FixedArray> imported_function_refs = Handle<FixedArray> imported_function_refs =
isolate->factory()->NewFixedArray(num_imported_functions); isolate->factory()->NewFixedArray(num_imported_functions);
...@@ -1345,18 +1322,18 @@ void WasmInstanceObject::InitDataSegmentArrays( ...@@ -1345,18 +1322,18 @@ void WasmInstanceObject::InitDataSegmentArrays(
// instructions). // instructions).
DCHECK(num_data_segments == 0 || DCHECK(num_data_segments == 0 ||
num_data_segments == module->data_segments.size()); num_data_segments == module->data_segments.size());
for (size_t i = 0; i < num_data_segments; ++i) { for (uint32_t i = 0; i < num_data_segments; ++i) {
const wasm::WasmDataSegment& segment = module->data_segments[i]; const wasm::WasmDataSegment& segment = module->data_segments[i];
// Initialize the pointer and size of passive segments. // Initialize the pointer and size of passive segments.
auto source_bytes = wire_bytes.SubVector(segment.source.offset(), auto source_bytes = wire_bytes.SubVector(segment.source.offset(),
segment.source.end_offset()); segment.source.end_offset());
instance->data_segment_starts()[i] = instance->data_segment_starts().set(
reinterpret_cast<Address>(source_bytes.begin()); i, reinterpret_cast<Address>(source_bytes.begin()));
// Set the active segments to being already dropped, since memory.init on // Set the active segments to being already dropped, since memory.init on
// a dropped passive segment and an active segment have the same // a dropped passive segment and an active segment have the same
// behavior. // behavior.
instance->data_segment_sizes()[i] = instance->data_segment_sizes().set(
segment.active ? 0 : source_bytes.length(); static_cast<int>(i), segment.active ? 0 : source_bytes.length());
} }
} }
...@@ -1366,18 +1343,18 @@ void WasmInstanceObject::InitElemSegmentArrays( ...@@ -1366,18 +1343,18 @@ void WasmInstanceObject::InitElemSegmentArrays(
auto module = module_object->module(); auto module = module_object->module();
auto num_elem_segments = module->elem_segments.size(); auto num_elem_segments = module->elem_segments.size();
for (size_t i = 0; i < num_elem_segments; ++i) { for (size_t i = 0; i < num_elem_segments; ++i) {
instance->dropped_elem_segments()[i] = instance->dropped_elem_segments().set(
module->elem_segments[i].status == static_cast<int>(i), module->elem_segments[i].status ==
wasm::WasmElemSegment::kStatusDeclarative wasm::WasmElemSegment::kStatusDeclarative
? 1 ? 1
: 0; : 0);
} }
} }
Address WasmInstanceObject::GetCallTarget(uint32_t func_index) { Address WasmInstanceObject::GetCallTarget(uint32_t func_index) {
wasm::NativeModule* native_module = module_object().native_module(); wasm::NativeModule* native_module = module_object().native_module();
if (func_index < native_module->num_imported_functions()) { if (func_index < native_module->num_imported_functions()) {
return imported_function_targets()[func_index]; return imported_function_targets().get(func_index);
} }
return jump_table_start() + return jump_table_start() +
JumpTableOffset(native_module->module(), func_index); JumpTableOffset(native_module->module(), func_index);
...@@ -1602,7 +1579,8 @@ uint8_t* WasmInstanceObject::GetGlobalStorage( ...@@ -1602,7 +1579,8 @@ uint8_t* WasmInstanceObject::GetGlobalStorage(
DCHECK(!global.type.is_reference()); DCHECK(!global.type.is_reference());
if (global.mutability && global.imported) { if (global.mutability && global.imported) {
return reinterpret_cast<byte*>( return reinterpret_cast<byte*>(
instance->imported_mutable_globals()[global.index]); instance->imported_mutable_globals().get_sandboxed_pointer(
global.index * kSystemPointerSize));
} else { } else {
return instance->globals_start() + global.offset; return instance->globals_start() + global.offset;
} }
...@@ -1619,7 +1597,7 @@ WasmInstanceObject::GetGlobalBufferAndIndex(Handle<WasmInstanceObject> instance, ...@@ -1619,7 +1597,7 @@ WasmInstanceObject::GetGlobalBufferAndIndex(Handle<WasmInstanceObject> instance,
FixedArray::cast( FixedArray::cast(
instance->imported_mutable_globals_buffers().get(global.index)), instance->imported_mutable_globals_buffers().get(global.index)),
isolate); isolate);
Address idx = instance->imported_mutable_globals()[global.index]; Address idx = instance->imported_mutable_globals().get(global.index);
DCHECK_LE(idx, std::numeric_limits<uint32_t>::max()); DCHECK_LE(idx, std::numeric_limits<uint32_t>::max());
return {buffer, static_cast<uint32_t>(idx)}; return {buffer, static_cast<uint32_t>(idx)};
} }
......
...@@ -342,8 +342,9 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject { ...@@ -342,8 +342,9 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
DECL_OPTIONAL_ACCESSORS(tables, FixedArray) DECL_OPTIONAL_ACCESSORS(tables, FixedArray)
DECL_OPTIONAL_ACCESSORS(indirect_function_tables, FixedArray) DECL_OPTIONAL_ACCESSORS(indirect_function_tables, FixedArray)
DECL_ACCESSORS(imported_function_refs, FixedArray) DECL_ACCESSORS(imported_function_refs, FixedArray)
DECL_ACCESSORS(imported_mutable_globals, ByteArray)
DECL_ACCESSORS(imported_function_targets, FixedAddressArray)
DECL_OPTIONAL_ACCESSORS(indirect_function_table_refs, FixedArray) DECL_OPTIONAL_ACCESSORS(indirect_function_table_refs, FixedArray)
DECL_OPTIONAL_ACCESSORS(managed_native_allocations, Foreign)
DECL_OPTIONAL_ACCESSORS(tags_table, FixedArray) DECL_OPTIONAL_ACCESSORS(tags_table, FixedArray)
DECL_OPTIONAL_ACCESSORS(wasm_internal_functions, FixedArray) DECL_OPTIONAL_ACCESSORS(wasm_internal_functions, FixedArray)
DECL_ACCESSORS(managed_object_maps, FixedArray) DECL_ACCESSORS(managed_object_maps, FixedArray)
...@@ -358,18 +359,16 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject { ...@@ -358,18 +359,16 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
DECL_PRIMITIVE_ACCESSORS(old_allocation_limit_address, Address*) DECL_PRIMITIVE_ACCESSORS(old_allocation_limit_address, Address*)
DECL_PRIMITIVE_ACCESSORS(old_allocation_top_address, Address*) DECL_PRIMITIVE_ACCESSORS(old_allocation_top_address, Address*)
DECL_PRIMITIVE_ACCESSORS(isorecursive_canonical_types, const uint32_t*) DECL_PRIMITIVE_ACCESSORS(isorecursive_canonical_types, const uint32_t*)
DECL_PRIMITIVE_ACCESSORS(imported_function_targets, Address*)
DECL_SANDBOXED_POINTER_ACCESSORS(globals_start, byte*) DECL_SANDBOXED_POINTER_ACCESSORS(globals_start, byte*)
DECL_PRIMITIVE_ACCESSORS(imported_mutable_globals, Address*)
DECL_PRIMITIVE_ACCESSORS(indirect_function_table_size, uint32_t) DECL_PRIMITIVE_ACCESSORS(indirect_function_table_size, uint32_t)
DECL_PRIMITIVE_ACCESSORS(indirect_function_table_sig_ids, uint32_t*) DECL_PRIMITIVE_ACCESSORS(indirect_function_table_sig_ids, uint32_t*)
DECL_PRIMITIVE_ACCESSORS(indirect_function_table_targets, Address*) DECL_PRIMITIVE_ACCESSORS(indirect_function_table_targets, Address*)
DECL_PRIMITIVE_ACCESSORS(jump_table_start, Address) DECL_PRIMITIVE_ACCESSORS(jump_table_start, Address)
DECL_PRIMITIVE_ACCESSORS(data_segment_starts, Address*)
DECL_PRIMITIVE_ACCESSORS(data_segment_sizes, uint32_t*)
DECL_PRIMITIVE_ACCESSORS(dropped_elem_segments, byte*)
DECL_PRIMITIVE_ACCESSORS(hook_on_function_call_address, Address) DECL_PRIMITIVE_ACCESSORS(hook_on_function_call_address, Address)
DECL_PRIMITIVE_ACCESSORS(tiering_budget_array, uint32_t*) DECL_PRIMITIVE_ACCESSORS(tiering_budget_array, uint32_t*)
DECL_ACCESSORS(data_segment_starts, FixedAddressArray)
DECL_ACCESSORS(data_segment_sizes, FixedUInt32Array)
DECL_ACCESSORS(dropped_elem_segments, FixedUInt8Array)
DECL_PRIMITIVE_ACCESSORS(break_on_entry, uint8_t) DECL_PRIMITIVE_ACCESSORS(break_on_entry, uint8_t)
// Clear uninitialized padding space. This ensures that the snapshot content // Clear uninitialized padding space. This ensures that the snapshot content
...@@ -386,6 +385,8 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject { ...@@ -386,6 +385,8 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
/* Less than system pointer sized fields come first. */ \ /* Less than system pointer sized fields come first. */ \
V(kImportedFunctionRefsOffset, kTaggedSize) \ V(kImportedFunctionRefsOffset, kTaggedSize) \
V(kIndirectFunctionTableRefsOffset, kTaggedSize) \ V(kIndirectFunctionTableRefsOffset, kTaggedSize) \
V(kImportedMutableGlobalsOffset, kTaggedSize) \
V(kImportedFunctionTargetsOffset, kTaggedSize) \
V(kIndirectFunctionTableSizeOffset, kUInt32Size) \ V(kIndirectFunctionTableSizeOffset, kUInt32Size) \
/* Optional padding to align system pointer size fields */ \ /* Optional padding to align system pointer size fields */ \
V(kOptionalPaddingOffset, POINTER_SIZE_PADDING(kOptionalPaddingOffset)) \ V(kOptionalPaddingOffset, POINTER_SIZE_PADDING(kOptionalPaddingOffset)) \
...@@ -393,11 +394,9 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject { ...@@ -393,11 +394,9 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
V(kMemorySizeOffset, kSizetSize) \ V(kMemorySizeOffset, kSizetSize) \
V(kStackLimitAddressOffset, kSystemPointerSize) \ V(kStackLimitAddressOffset, kSystemPointerSize) \
V(kIsorecursiveCanonicalTypesOffset, kSystemPointerSize) \ V(kIsorecursiveCanonicalTypesOffset, kSystemPointerSize) \
V(kImportedFunctionTargetsOffset, kSystemPointerSize) \
V(kIndirectFunctionTableTargetsOffset, kSystemPointerSize) \ V(kIndirectFunctionTableTargetsOffset, kSystemPointerSize) \
V(kIndirectFunctionTableSigIdsOffset, kSystemPointerSize) \ V(kIndirectFunctionTableSigIdsOffset, kSystemPointerSize) \
V(kGlobalsStartOffset, kSystemPointerSize) \ V(kGlobalsStartOffset, kSystemPointerSize) \
V(kImportedMutableGlobalsOffset, kSystemPointerSize) \
V(kIsolateRootOffset, kSystemPointerSize) \ V(kIsolateRootOffset, kSystemPointerSize) \
V(kJumpTableStartOffset, kSystemPointerSize) \ V(kJumpTableStartOffset, kSystemPointerSize) \
/* End of often-accessed fields. */ \ /* End of often-accessed fields. */ \
...@@ -407,12 +406,12 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject { ...@@ -407,12 +406,12 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
V(kOldAllocationLimitAddressOffset, kSystemPointerSize) \ V(kOldAllocationLimitAddressOffset, kSystemPointerSize) \
V(kOldAllocationTopAddressOffset, kSystemPointerSize) \ V(kOldAllocationTopAddressOffset, kSystemPointerSize) \
V(kRealStackLimitAddressOffset, kSystemPointerSize) \ V(kRealStackLimitAddressOffset, kSystemPointerSize) \
V(kDataSegmentStartsOffset, kSystemPointerSize) \
V(kDataSegmentSizesOffset, kSystemPointerSize) \
V(kDroppedElemSegmentsOffset, kSystemPointerSize) \
V(kHookOnFunctionCallAddressOffset, kSystemPointerSize) \ V(kHookOnFunctionCallAddressOffset, kSystemPointerSize) \
V(kTieringBudgetArrayOffset, kSystemPointerSize) \ V(kTieringBudgetArrayOffset, kSystemPointerSize) \
/* Less than system pointer size aligned fields are below. */ \ /* Less than system pointer size aligned fields are below. */ \
V(kDataSegmentStartsOffset, kTaggedSize) \
V(kDataSegmentSizesOffset, kTaggedSize) \
V(kDroppedElemSegmentsOffset, kTaggedSize) \
V(kModuleObjectOffset, kTaggedSize) \ V(kModuleObjectOffset, kTaggedSize) \
V(kExportsObjectOffset, kTaggedSize) \ V(kExportsObjectOffset, kTaggedSize) \
V(kNativeContextOffset, kTaggedSize) \ V(kNativeContextOffset, kTaggedSize) \
...@@ -422,7 +421,6 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject { ...@@ -422,7 +421,6 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
V(kImportedMutableGlobalsBuffersOffset, kTaggedSize) \ V(kImportedMutableGlobalsBuffersOffset, kTaggedSize) \
V(kTablesOffset, kTaggedSize) \ V(kTablesOffset, kTaggedSize) \
V(kIndirectFunctionTablesOffset, kTaggedSize) \ V(kIndirectFunctionTablesOffset, kTaggedSize) \
V(kManagedNativeAllocationsOffset, kTaggedSize) \
V(kTagsTableOffset, kTaggedSize) \ V(kTagsTableOffset, kTaggedSize) \
V(kWasmInternalFunctionsOffset, kTaggedSize) \ V(kWasmInternalFunctionsOffset, kTaggedSize) \
V(kManagedObjectMapsOffset, kTaggedSize) \ V(kManagedObjectMapsOffset, kTaggedSize) \
...@@ -459,11 +457,15 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject { ...@@ -459,11 +457,15 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
kImportedMutableGlobalsBuffersOffset, kImportedMutableGlobalsBuffersOffset,
kTablesOffset, kTablesOffset,
kIndirectFunctionTablesOffset, kIndirectFunctionTablesOffset,
kManagedNativeAllocationsOffset,
kTagsTableOffset, kTagsTableOffset,
kWasmInternalFunctionsOffset, kWasmInternalFunctionsOffset,
kManagedObjectMapsOffset, kManagedObjectMapsOffset,
kFeedbackVectorsOffset}; kFeedbackVectorsOffset,
kImportedMutableGlobalsOffset,
kImportedFunctionTargetsOffset,
kDataSegmentStartsOffset,
kDataSegmentSizesOffset,
kDroppedElemSegmentsOffset};
const wasm::WasmModule* module(); const wasm::WasmModule* module();
......
...@@ -327,8 +327,19 @@ uint32_t TestingModuleBuilder::AddPassiveDataSegment( ...@@ -327,8 +327,19 @@ uint32_t TestingModuleBuilder::AddPassiveDataSegment(
data_segment_sizes_.push_back(bytes.length()); data_segment_sizes_.push_back(bytes.length());
// The vector pointers may have moved, so update the instance object. // The vector pointers may have moved, so update the instance object.
instance_object_->set_data_segment_starts(data_segment_starts_.data()); uint32_t size = static_cast<uint32_t>(data_segment_sizes_.size());
instance_object_->set_data_segment_sizes(data_segment_sizes_.data()); Handle<FixedAddressArray> data_segment_starts =
FixedAddressArray::New(isolate_, size);
data_segment_starts->copy_in(
0, reinterpret_cast<byte*>(data_segment_starts_.data()),
size * sizeof(Address));
instance_object_->set_data_segment_starts(*data_segment_starts);
Handle<FixedUInt32Array> data_segment_sizes =
FixedUInt32Array::New(isolate_, size);
data_segment_sizes->copy_in(
0, reinterpret_cast<byte*>(data_segment_sizes_.data()),
size * sizeof(uint32_t));
instance_object_->set_data_segment_sizes(*data_segment_sizes);
return index; return index;
} }
...@@ -347,7 +358,11 @@ uint32_t TestingModuleBuilder::AddPassiveElementSegment( ...@@ -347,7 +358,11 @@ uint32_t TestingModuleBuilder::AddPassiveElementSegment(
// The vector pointers may have moved, so update the instance object. // The vector pointers may have moved, so update the instance object.
dropped_elem_segments_.push_back(0); dropped_elem_segments_.push_back(0);
instance_object_->set_dropped_elem_segments(dropped_elem_segments_.data()); uint32_t size = static_cast<uint32_t>(dropped_elem_segments_.size());
Handle<FixedUInt8Array> dropped_elem_segments =
FixedUInt8Array::New(isolate_, size);
dropped_elem_segments->copy_in(0, dropped_elem_segments_.data(), size);
instance_object_->set_dropped_elem_segments(*dropped_elem_segments);
return index; return index;
} }
......
...@@ -1836,14 +1836,14 @@ class WasmInterpreterInternals { ...@@ -1836,14 +1836,14 @@ class WasmInterpreterInternals {
uint64_t dst = ToMemType(Pop()); uint64_t dst = ToMemType(Pop());
Address dst_addr; Address dst_addr;
uint64_t src_max = uint64_t src_max =
instance_object_->data_segment_sizes()[imm.data_segment.index]; instance_object_->data_segment_sizes().get(imm.data_segment.index);
if (!BoundsCheckMemRange(dst, &size, &dst_addr) || if (!BoundsCheckMemRange(dst, &size, &dst_addr) ||
!base::IsInBounds(src, size, src_max)) { !base::IsInBounds(src, size, src_max)) {
DoTrap(kTrapMemOutOfBounds, pc); DoTrap(kTrapMemOutOfBounds, pc);
return false; return false;
} }
Address src_addr = Address src_addr = instance_object_->data_segment_starts().get(
instance_object_->data_segment_starts()[imm.data_segment.index] + imm.data_segment.index) +
src; src;
std::memmove(reinterpret_cast<void*>(dst_addr), std::memmove(reinterpret_cast<void*>(dst_addr),
reinterpret_cast<void*>(src_addr), size); reinterpret_cast<void*>(src_addr), size);
...@@ -1856,7 +1856,7 @@ class WasmInterpreterInternals { ...@@ -1856,7 +1856,7 @@ class WasmInterpreterInternals {
// validation. // validation.
DCHECK_LT(imm.index, module()->num_declared_data_segments); DCHECK_LT(imm.index, module()->num_declared_data_segments);
*len += imm.length; *len += imm.length;
instance_object_->data_segment_sizes()[imm.index] = 0; instance_object_->data_segment_sizes().set(imm.index, 0);
return true; return true;
} }
case kExprMemoryCopy: { case kExprMemoryCopy: {
...@@ -1916,7 +1916,7 @@ class WasmInterpreterInternals { ...@@ -1916,7 +1916,7 @@ class WasmInterpreterInternals {
IndexImmediate<Decoder::kNoValidation> imm(decoder, code->at(pc + *len), IndexImmediate<Decoder::kNoValidation> imm(decoder, code->at(pc + *len),
"element segment index"); "element segment index");
*len += imm.length; *len += imm.length;
instance_object_->dropped_elem_segments()[imm.index] = 1; instance_object_->dropped_elem_segments().set(imm.index, 1);
return true; return true;
} }
case kExprTableCopy: { case kExprTableCopy: {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment