Commit 9eb278b8 authored by Jakob Gruber's avatar Jakob Gruber Committed by Commit Bot

Revert "[ia32] Remove custom MemMove function"

This reverts commit 4a9f186b.

Reason for revert: Regresses microbenchmarks. https://crbug.com/v8/8675#c5

Original change's description:
> [ia32] Remove custom MemMove function
> 
> It isn't clear whether our custom generated MemMove function provides
> any benefits over std::memmove. This is an attempt to remove it. If
> bots seem unhappy we can revert.
> 
> Bug: v8:7777,v8:8675
> Change-Id: I7f1a6e3050b6e635618593c04f7d51e448426ee2
> Reviewed-on: https://chromium-review.googlesource.com/c/1405854
> Commit-Queue: Jakob Gruber <jgruber@chromium.org>
> Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#58748}

TBR=jkummerow@chromium.org,jgruber@chromium.org

# Not skipping CQ checks because original CL landed > 1 day ago.

Bug: v8:7777, v8:8675
Change-Id: Ia4ad37070f433f76b1158e90835162aefe38abdd
Reviewed-on: https://chromium-review.googlesource.com/c/1407063Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58766}
parent 55decb63
......@@ -2682,6 +2682,7 @@ v8_source_set("v8_base") {
"src/ia32/assembler-ia32-inl.h",
"src/ia32/assembler-ia32.cc",
"src/ia32/assembler-ia32.h",
"src/ia32/codegen-ia32.cc",
"src/ia32/constants-ia32.h",
"src/ia32/cpu-ia32.cc",
"src/ia32/deoptimizer-ia32.cc",
......
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_IA32
#include "src/heap/factory-inl.h"
#include "src/heap/heap.h"
#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
// Helper functions for CreateMemMoveFunction.
#define __ ACCESS_MASM(masm)
enum Direction { FORWARD, BACKWARD };
enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
// Expects registers:
// esi - source, aligned if alignment == ALIGNED
// edi - destination, always aligned
// ecx - count (copy size in bytes)
// edx - loop count (number of 64 byte chunks)
void MemMoveEmitMainLoop(MacroAssembler* masm,
Label* move_last_15,
Direction direction,
Alignment alignment) {
Register src = esi;
Register dst = edi;
Register count = ecx;
Register loop_count = edx;
Label loop, move_last_31, move_last_63;
__ cmp(loop_count, 0);
__ j(equal, &move_last_63);
__ bind(&loop);
// Main loop. Copy in 64 byte chunks.
if (direction == BACKWARD) __ sub(src, Immediate(0x40));
__ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
__ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
__ movdq(alignment == MOVE_ALIGNED, xmm2, Operand(src, 0x20));
__ movdq(alignment == MOVE_ALIGNED, xmm3, Operand(src, 0x30));
if (direction == FORWARD) __ add(src, Immediate(0x40));
if (direction == BACKWARD) __ sub(dst, Immediate(0x40));
__ movdqa(Operand(dst, 0x00), xmm0);
__ movdqa(Operand(dst, 0x10), xmm1);
__ movdqa(Operand(dst, 0x20), xmm2);
__ movdqa(Operand(dst, 0x30), xmm3);
if (direction == FORWARD) __ add(dst, Immediate(0x40));
__ dec(loop_count);
__ j(not_zero, &loop);
// At most 63 bytes left to copy.
__ bind(&move_last_63);
__ test(count, Immediate(0x20));
__ j(zero, &move_last_31);
if (direction == BACKWARD) __ sub(src, Immediate(0x20));
__ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
__ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
if (direction == FORWARD) __ add(src, Immediate(0x20));
if (direction == BACKWARD) __ sub(dst, Immediate(0x20));
__ movdqa(Operand(dst, 0x00), xmm0);
__ movdqa(Operand(dst, 0x10), xmm1);
if (direction == FORWARD) __ add(dst, Immediate(0x20));
// At most 31 bytes left to copy.
__ bind(&move_last_31);
__ test(count, Immediate(0x10));
__ j(zero, move_last_15);
if (direction == BACKWARD) __ sub(src, Immediate(0x10));
__ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0));
if (direction == FORWARD) __ add(src, Immediate(0x10));
if (direction == BACKWARD) __ sub(dst, Immediate(0x10));
__ movdqa(Operand(dst, 0), xmm0);
if (direction == FORWARD) __ add(dst, Immediate(0x10));
}
void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
__ pop(esi);
__ pop(edi);
__ ret(0);
}
#undef __
#define __ masm.
class LabelConverter {
public:
explicit LabelConverter(byte* buffer) : buffer_(buffer) {}
int32_t address(Label* l) const {
return reinterpret_cast<int32_t>(buffer_) + l->pos();
}
private:
byte* buffer_;
};
MemMoveFunction CreateMemMoveFunction() {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
byte* buffer = AllocatePage(page_allocator,
page_allocator->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
LabelConverter conv(buffer);
// Generated code is put into a fixed, unmovable buffer, and not into
// the V8 heap. We can't, and don't, refer to any relocatable addresses
// (e.g. the JavaScript nan-object).
// 32-bit C declaration function calls pass arguments on stack.
// Stack layout:
// esp[12]: Third argument, size.
// esp[8]: Second argument, source pointer.
// esp[4]: First argument, destination pointer.
// esp[0]: return address
const int kDestinationOffset = 1 * kPointerSize;
const int kSourceOffset = 2 * kPointerSize;
const int kSizeOffset = 3 * kPointerSize;
// When copying up to this many bytes, use special "small" handlers.
const size_t kSmallCopySize = 8;
// When copying up to this many bytes, use special "medium" handlers.
const size_t kMediumCopySize = 63;
// When non-overlapping region of src and dst is less than this,
// use a more careful implementation (slightly slower).
const size_t kMinMoveDistance = 16;
// Note that these values are dictated by the implementation below,
// do not just change them and hope things will work!
int stack_offset = 0; // Update if we change the stack height.
Label backward, backward_much_overlap;
Label forward_much_overlap, small_size, medium_size, pop_and_return;
__ push(edi);
__ push(esi);
stack_offset += 2 * kPointerSize;
Register dst = edi;
Register src = esi;
Register count = ecx;
Register loop_count = edx;
__ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
__ mov(src, Operand(esp, stack_offset + kSourceOffset));
__ mov(count, Operand(esp, stack_offset + kSizeOffset));
__ cmp(dst, src);
__ j(equal, &pop_and_return);
__ prefetch(Operand(src, 0), 1);
__ cmp(count, kSmallCopySize);
__ j(below_equal, &small_size);
__ cmp(count, kMediumCopySize);
__ j(below_equal, &medium_size);
__ cmp(dst, src);
__ j(above, &backward);
{
// |dst| is a lower address than |src|. Copy front-to-back.
Label unaligned_source, move_last_15, skip_last_move;
__ mov(eax, src);
__ sub(eax, dst);
__ cmp(eax, kMinMoveDistance);
__ j(below, &forward_much_overlap);
// Copy first 16 bytes.
__ movdqu(xmm0, Operand(src, 0));
__ movdqu(Operand(dst, 0), xmm0);
// Determine distance to alignment: 16 - (dst & 0xF).
__ mov(edx, dst);
__ and_(edx, 0xF);
__ neg(edx);
__ add(edx, Immediate(16));
__ add(dst, edx);
__ add(src, edx);
__ sub(count, edx);
// dst is now aligned. Main copy loop.
__ mov(loop_count, count);
__ shr(loop_count, 6);
// Check if src is also aligned.
__ test(src, Immediate(0xF));
__ j(not_zero, &unaligned_source);
// Copy loop for aligned source and destination.
MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_ALIGNED);
// At most 15 bytes to copy. Copy 16 bytes at end of string.
__ bind(&move_last_15);
__ and_(count, 0xF);
__ j(zero, &skip_last_move, Label::kNear);
__ movdqu(xmm0, Operand(src, count, times_1, -0x10));
__ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
__ bind(&skip_last_move);
MemMoveEmitPopAndReturn(&masm);
// Copy loop for unaligned source and aligned destination.
__ bind(&unaligned_source);
MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_UNALIGNED);
__ jmp(&move_last_15);
// Less than kMinMoveDistance offset between dst and src.
Label loop_until_aligned, last_15_much_overlap;
__ bind(&loop_until_aligned);
__ mov_b(eax, Operand(src, 0));
__ inc(src);
__ mov_b(Operand(dst, 0), eax);
__ inc(dst);
__ dec(count);
__ bind(&forward_much_overlap); // Entry point into this block.
__ test(dst, Immediate(0xF));
__ j(not_zero, &loop_until_aligned);
// dst is now aligned, src can't be. Main copy loop.
__ mov(loop_count, count);
__ shr(loop_count, 6);
MemMoveEmitMainLoop(&masm, &last_15_much_overlap,
FORWARD, MOVE_UNALIGNED);
__ bind(&last_15_much_overlap);
__ and_(count, 0xF);
__ j(zero, &pop_and_return);
__ cmp(count, kSmallCopySize);
__ j(below_equal, &small_size);
__ jmp(&medium_size);
}
{
// |dst| is a higher address than |src|. Copy backwards.
Label unaligned_source, move_first_15, skip_last_move;
__ bind(&backward);
// |dst| and |src| always point to the end of what's left to copy.
__ add(dst, count);
__ add(src, count);
__ mov(eax, dst);
__ sub(eax, src);
__ cmp(eax, kMinMoveDistance);
__ j(below, &backward_much_overlap);
// Copy last 16 bytes.
__ movdqu(xmm0, Operand(src, -0x10));
__ movdqu(Operand(dst, -0x10), xmm0);
// Find distance to alignment: dst & 0xF
__ mov(edx, dst);
__ and_(edx, 0xF);
__ sub(dst, edx);
__ sub(src, edx);
__ sub(count, edx);
// dst is now aligned. Main copy loop.
__ mov(loop_count, count);
__ shr(loop_count, 6);
// Check if src is also aligned.
__ test(src, Immediate(0xF));
__ j(not_zero, &unaligned_source);
// Copy loop for aligned source and destination.
MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_ALIGNED);
// At most 15 bytes to copy. Copy 16 bytes at beginning of string.
__ bind(&move_first_15);
__ and_(count, 0xF);
__ j(zero, &skip_last_move, Label::kNear);
__ sub(src, count);
__ sub(dst, count);
__ movdqu(xmm0, Operand(src, 0));
__ movdqu(Operand(dst, 0), xmm0);
__ bind(&skip_last_move);
MemMoveEmitPopAndReturn(&masm);
// Copy loop for unaligned source and aligned destination.
__ bind(&unaligned_source);
MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_UNALIGNED);
__ jmp(&move_first_15);
// Less than kMinMoveDistance offset between dst and src.
Label loop_until_aligned, first_15_much_overlap;
__ bind(&loop_until_aligned);
__ dec(src);
__ dec(dst);
__ mov_b(eax, Operand(src, 0));
__ mov_b(Operand(dst, 0), eax);
__ dec(count);
__ bind(&backward_much_overlap); // Entry point into this block.
__ test(dst, Immediate(0xF));
__ j(not_zero, &loop_until_aligned);
// dst is now aligned, src can't be. Main copy loop.
__ mov(loop_count, count);
__ shr(loop_count, 6);
MemMoveEmitMainLoop(&masm, &first_15_much_overlap,
BACKWARD, MOVE_UNALIGNED);
__ bind(&first_15_much_overlap);
__ and_(count, 0xF);
__ j(zero, &pop_and_return);
// Small/medium handlers expect dst/src to point to the beginning.
__ sub(dst, count);
__ sub(src, count);
__ cmp(count, kSmallCopySize);
__ j(below_equal, &small_size);
__ jmp(&medium_size);
}
{
// Special handlers for 9 <= copy_size < 64. No assumptions about
// alignment or move distance, so all reads must be unaligned and
// must happen before any writes.
Label medium_handlers, f9_16, f17_32, f33_48, f49_63;
__ bind(&f9_16);
__ movsd(xmm0, Operand(src, 0));
__ movsd(xmm1, Operand(src, count, times_1, -8));
__ movsd(Operand(dst, 0), xmm0);
__ movsd(Operand(dst, count, times_1, -8), xmm1);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f17_32);
__ movdqu(xmm0, Operand(src, 0));
__ movdqu(xmm1, Operand(src, count, times_1, -0x10));
__ movdqu(Operand(dst, 0x00), xmm0);
__ movdqu(Operand(dst, count, times_1, -0x10), xmm1);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f33_48);
__ movdqu(xmm0, Operand(src, 0x00));
__ movdqu(xmm1, Operand(src, 0x10));
__ movdqu(xmm2, Operand(src, count, times_1, -0x10));
__ movdqu(Operand(dst, 0x00), xmm0);
__ movdqu(Operand(dst, 0x10), xmm1);
__ movdqu(Operand(dst, count, times_1, -0x10), xmm2);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f49_63);
__ movdqu(xmm0, Operand(src, 0x00));
__ movdqu(xmm1, Operand(src, 0x10));
__ movdqu(xmm2, Operand(src, 0x20));
__ movdqu(xmm3, Operand(src, count, times_1, -0x10));
__ movdqu(Operand(dst, 0x00), xmm0);
__ movdqu(Operand(dst, 0x10), xmm1);
__ movdqu(Operand(dst, 0x20), xmm2);
__ movdqu(Operand(dst, count, times_1, -0x10), xmm3);
MemMoveEmitPopAndReturn(&masm);
__ bind(&medium_handlers);
__ dd(conv.address(&f9_16));
__ dd(conv.address(&f17_32));
__ dd(conv.address(&f33_48));
__ dd(conv.address(&f49_63));
__ bind(&medium_size); // Entry point into this block.
__ mov(eax, count);
__ dec(eax);
__ shr(eax, 4);
if (FLAG_debug_code) {
Label ok;
__ cmp(eax, 3);
__ j(below_equal, &ok);
__ int3();
__ bind(&ok);
}
__ mov(eax, Operand(eax, times_4, conv.address(&medium_handlers)));
__ jmp(eax);
}
{
// Specialized copiers for copy_size <= 8 bytes.
Label small_handlers, f0, f1, f2, f3, f4, f5_8;
__ bind(&f0);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f1);
__ mov_b(eax, Operand(src, 0));
__ mov_b(Operand(dst, 0), eax);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f2);
__ mov_w(eax, Operand(src, 0));
__ mov_w(Operand(dst, 0), eax);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f3);
__ mov_w(eax, Operand(src, 0));
__ mov_b(edx, Operand(src, 2));
__ mov_w(Operand(dst, 0), eax);
__ mov_b(Operand(dst, 2), edx);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f4);
__ mov(eax, Operand(src, 0));
__ mov(Operand(dst, 0), eax);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f5_8);
__ mov(eax, Operand(src, 0));
__ mov(edx, Operand(src, count, times_1, -4));
__ mov(Operand(dst, 0), eax);
__ mov(Operand(dst, count, times_1, -4), edx);
MemMoveEmitPopAndReturn(&masm);
__ bind(&small_handlers);
__ dd(conv.address(&f0));
__ dd(conv.address(&f1));
__ dd(conv.address(&f2));
__ dd(conv.address(&f3));
__ dd(conv.address(&f4));
__ dd(conv.address(&f5_8));
__ dd(conv.address(&f5_8));
__ dd(conv.address(&f5_8));
__ dd(conv.address(&f5_8));
__ bind(&small_size); // Entry point into this block.
if (FLAG_debug_code) {
Label ok;
__ cmp(count, 8);
__ j(below_equal, &ok);
__ int3();
__ bind(&ok);
}
__ mov(eax, Operand(count, times_4, conv.address(&small_handlers)));
__ jmp(eax);
}
__ bind(&pop_and_return);
MemMoveEmitPopAndReturn(&masm);
CodeDesc desc;
masm.GetCode(nullptr, &desc);
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(page_allocator, buffer, allocated,
PageAllocator::kReadExecute));
// TODO(jkummerow): It would be nice to register this code creation event
// with the PROFILE / GDBJIT system.
return FUNCTION_CAST<MemMoveFunction>(buffer);
}
#undef __
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_IA32
......@@ -7,7 +7,26 @@
namespace v8 {
namespace internal {
#if V8_OS_POSIX && V8_HOST_ARCH_ARM
#if V8_TARGET_ARCH_IA32
static void MemMoveWrapper(void* dest, const void* src, size_t size) {
memmove(dest, src, size);
}
// Initialize to library version so we can call this at any time during startup.
static MemMoveFunction memmove_function = &MemMoveWrapper;
// Defined in codegen-ia32.cc.
MemMoveFunction CreateMemMoveFunction();
// Copy memory area to disjoint memory area.
V8_EXPORT_PRIVATE void MemMove(void* dest, const void* src, size_t size) {
if (size == 0) return;
// Note: here we rely on dependent reads being ordered. This is true
// on all architectures we currently support.
(*memmove_function)(dest, src, size);
}
#elif V8_OS_POSIX && V8_HOST_ARCH_ARM
void MemCopyUint16Uint8Wrapper(uint16_t* dest, const uint8_t* src,
size_t chars) {
uint16_t* limit = dest + chars;
......@@ -37,7 +56,12 @@ static bool g_memcopy_functions_initialized = false;
void init_memcopy_functions() {
if (g_memcopy_functions_initialized) return;
g_memcopy_functions_initialized = true;
#if V8_OS_POSIX && V8_HOST_ARCH_ARM
#if V8_TARGET_ARCH_IA32
MemMoveFunction generated_memmove = CreateMemMoveFunction();
if (generated_memmove != nullptr) {
memmove_function = generated_memmove;
}
#elif V8_OS_POSIX && V8_HOST_ARCH_ARM
memcopy_uint8_function = CreateMemCopyUint8Function(&MemCopyUint8Wrapper);
memcopy_uint16_uint8_function =
CreateMemCopyUint16Uint8Function(&MemCopyUint16Uint8Wrapper);
......
......@@ -23,7 +23,21 @@ typedef uintptr_t Address;
// Initializes the codegen support that depends on CPU features.
void init_memcopy_functions();
#if defined(V8_HOST_ARCH_ARM)
#if defined(V8_TARGET_ARCH_IA32)
// Limit below which the extra overhead of the MemCopy function is likely
// to outweigh the benefits of faster copying.
const int kMinComplexMemCopy = 64;
// Copy memory area. No restrictions.
V8_EXPORT_PRIVATE void MemMove(void* dest, const void* src, size_t size);
typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size);
// Keep the distinction of "move" vs. "copy" for the benefit of other
// architectures.
V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
MemMove(dest, src, size);
}
#elif defined(V8_HOST_ARCH_ARM)
typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src,
size_t size);
V8_EXPORT_PRIVATE extern MemCopyUint8Function memcopy_uint8_function;
......@@ -74,15 +88,13 @@ V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src,
#else
// Copy memory area to disjoint memory area.
V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
std::memcpy(dest, src, size);
memcpy(dest, src, size);
}
V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src,
size_t size) {
std::memmove(dest, src, size);
memmove(dest, src, size);
}
static constexpr int kMinComplexMemCopy = 8;
const int kMinComplexMemCopy = 8;
#endif // V8_TARGET_ARCH_IA32
// Copies words from |src| to |dst|. The data spans must not overlap.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment