From 8b9a5bccb9e151399000b4af112cbb90c93458ab Mon Sep 17 00:00:00 2001 From: Guillaume Chatelet Date: Wed, 16 Jul 2025 10:28:05 +0200 Subject: [PATCH] Revert "[libc][NFC] refactor Cortex `memcpy` code (#148204)" This reverts commit 7c69c3b0baa5a99680aa8c6972a6b3ea41393943. --- libc/src/string/memory_utils/CMakeLists.txt | 1 - libc/src/string/memory_utils/arm/common.h | 52 ----- .../string/memory_utils/arm/inline_memcpy.h | 195 +++++++++--------- .../llvm-project-overlay/libc/BUILD.bazel | 1 - 4 files changed, 98 insertions(+), 151 deletions(-) delete mode 100644 libc/src/string/memory_utils/arm/common.h diff --git a/libc/src/string/memory_utils/CMakeLists.txt b/libc/src/string/memory_utils/CMakeLists.txt index 633d9f12949d2..a967247db53f4 100644 --- a/libc/src/string/memory_utils/CMakeLists.txt +++ b/libc/src/string/memory_utils/CMakeLists.txt @@ -7,7 +7,6 @@ add_header_library( aarch64/inline_memcpy.h aarch64/inline_memmove.h aarch64/inline_memset.h - arm/common.h arm/inline_memcpy.h generic/aligned_access.h generic/byte_per_byte.h diff --git a/libc/src/string/memory_utils/arm/common.h b/libc/src/string/memory_utils/arm/common.h deleted file mode 100644 index 155bc3481709e..0000000000000 --- a/libc/src/string/memory_utils/arm/common.h +++ /dev/null @@ -1,52 +0,0 @@ -//===-- Common constants and defines for arm --------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_LIBC_SRC_STRING_MEMORY_UTILS_ARM_COMMON_H -#define LLVM_LIBC_SRC_STRING_MEMORY_UTILS_ARM_COMMON_H - -#include "src/__support/macros/attributes.h" // LIBC_INLINE_VAR -#include "src/string/memory_utils/utils.h" // CPtr, Ptr, distance_to_align - -#include // size_t - -// https://libc.llvm.org/compiler_support.html -// Support for [[likely]] / [[unlikely]] -// [X] GCC 12.2 -// [X] Clang 12 -// [ ] Clang 11 -#define LIBC_ATTR_LIKELY [[likely]] -#define LIBC_ATTR_UNLIKELY [[unlikely]] - -#if defined(LIBC_COMPILER_IS_CLANG) -#if LIBC_COMPILER_CLANG_VER < 1200 -#undef LIBC_ATTR_LIKELY -#undef LIBC_ATTR_UNLIKELY -#define LIBC_ATTR_LIKELY -#define LIBC_ATTR_UNLIKELY -#endif -#endif - -namespace LIBC_NAMESPACE_DECL { - -LIBC_INLINE_VAR constexpr size_t kWordSize = sizeof(uint32_t); - -enum class AssumeAccess { kUnknown, kAligned }; -enum class BlockOp { kFull, kByWord }; - -LIBC_INLINE auto misaligned(CPtr ptr) { - return distance_to_align_down(ptr); -} - -LIBC_INLINE CPtr bitwise_or(CPtr a, CPtr b) { - return cpp::bit_cast(cpp::bit_cast(a) | - cpp::bit_cast(b)); -} - -} // namespace LIBC_NAMESPACE_DECL - -#endif // LLVM_LIBC_SRC_STRING_MEMORY_UTILS_ARM_COMMON_H diff --git a/libc/src/string/memory_utils/arm/inline_memcpy.h b/libc/src/string/memory_utils/arm/inline_memcpy.h index 30b99d41e0967..61efebe29b485 100644 --- a/libc/src/string/memory_utils/arm/inline_memcpy.h +++ b/libc/src/string/memory_utils/arm/inline_memcpy.h @@ -5,56 +5,63 @@ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// -// The functions defined in this file give approximate code size. These sizes -// assume the following configuration options: -// - LIBC_CONF_KEEP_FRAME_POINTER = false -// - LIBC_CONF_ENABLE_STRONG_STACK_PROTECTOR = false -// - LIBC_ADD_NULL_CHECKS = false #ifndef LLVM_LIBC_SRC_STRING_MEMORY_UTILS_ARM_INLINE_MEMCPY_H #define LLVM_LIBC_SRC_STRING_MEMORY_UTILS_ARM_INLINE_MEMCPY_H #include "src/__support/macros/attributes.h" // LIBC_INLINE #include "src/__support/macros/optimization.h" // LIBC_LOOP_NOUNROLL -#include "src/string/memory_utils/arm/common.h" // LIBC_ATTR_LIKELY, LIBC_ATTR_UNLIKELY #include "src/string/memory_utils/utils.h" // memcpy_inline, distance_to_align #include // size_t +// https://libc.llvm.org/compiler_support.html +// Support for [[likely]] / [[unlikely]] +// [X] GCC 12.2 +// [X] Clang 12 +// [ ] Clang 11 +#define LIBC_ATTR_LIKELY [[likely]] +#define LIBC_ATTR_UNLIKELY [[unlikely]] + +#if defined(LIBC_COMPILER_IS_CLANG) +#if LIBC_COMPILER_CLANG_VER < 1200 +#undef LIBC_ATTR_LIKELY +#undef LIBC_ATTR_UNLIKELY +#define LIBC_ATTR_LIKELY +#define LIBC_ATTR_UNLIKELY +#endif +#endif + namespace LIBC_NAMESPACE_DECL { namespace { -// Performs a copy of `bytes` byte from `src` to `dst`. This function has the -// semantics of `memcpy` where `src` and `dst` are `__restrict`. The compiler is -// free to use whatever instruction is best for the size and assumed access. -template -LIBC_INLINE void copy(void *dst, const void *src) { - if constexpr (access == AssumeAccess::kAligned) { - constexpr size_t alignment = bytes > kWordSize ? kWordSize : bytes; - memcpy_inline(assume_aligned(dst), - assume_aligned(src)); - } else if constexpr (access == AssumeAccess::kUnknown) { - memcpy_inline(dst, src); - } else { - static_assert(false); - } -} +LIBC_INLINE_VAR constexpr size_t kWordSize = sizeof(uint32_t); -template -LIBC_INLINE void copy_block_and_bump_pointers(Ptr &dst, CPtr &src) { - if constexpr (block_op == BlockOp::kFull) { - copy(dst, src); - } else if constexpr (block_op == BlockOp::kByWord) { +enum Strategy { + ForceWordLdStChain, + AssumeWordAligned, + AssumeUnaligned, +}; + +template +LIBC_INLINE void copy_and_bump_pointers(Ptr &dst, CPtr &src) { + if constexpr (strategy == AssumeUnaligned) { + memcpy_inline(assume_aligned<1>(dst), assume_aligned<1>(src)); + } else if constexpr (strategy == AssumeWordAligned) { + static_assert(bytes >= kWordSize); + memcpy_inline(assume_aligned(dst), + assume_aligned(src)); + } else if constexpr (strategy == ForceWordLdStChain) { // We restrict loads/stores to 4 byte to prevent the use of load/store - // multiple (LDM, STM) and load/store double (LDRD, STRD). + // multiple (LDM, STM) and load/store double (LDRD, STRD). First, they may + // fault (see notes below) and second, they use more registers which in turn + // adds push/pop instructions in the hot path. static_assert((bytes % kWordSize == 0) && (bytes >= kWordSize)); LIBC_LOOP_UNROLL - for (size_t offset = 0; offset < bytes; offset += kWordSize) { - copy(dst + offset, src + offset); + for (size_t i = 0; i < bytes / kWordSize; ++i) { + const size_t offset = i * kWordSize; + memcpy_inline(dst + offset, src + offset); } - } else { - static_assert(false, "Invalid BlockOp"); } // In the 1, 2, 4 byte copy case, the compiler can fold pointer offsetting // into the load/store instructions. @@ -65,27 +72,39 @@ LIBC_INLINE void copy_block_and_bump_pointers(Ptr &dst, CPtr &src) { src += bytes; } -template -LIBC_INLINE void consume_by_block(Ptr &dst, CPtr &src, size_t &size) { +LIBC_INLINE void copy_bytes_and_bump_pointers(Ptr &dst, CPtr &src, + const size_t size) { LIBC_LOOP_NOUNROLL - for (size_t i = 0; i < size / bytes; ++i) - copy_block_and_bump_pointers(dst, src); - size %= bytes; + for (size_t i = 0; i < size; ++i) + *dst++ = *src++; } -[[maybe_unused]] LIBC_INLINE void -copy_bytes_and_bump_pointers(Ptr &dst, CPtr &src, size_t size) { +template +LIBC_INLINE void copy_blocks_and_update_args(Ptr &dst, CPtr &src, + size_t &size) { LIBC_LOOP_NOUNROLL - for (size_t i = 0; i < size; ++i) - *dst++ = *src++; + for (size_t i = 0; i < size / block_size; ++i) + copy_and_bump_pointers(dst, src); + // Update `size` once at the end instead of once per iteration. + size %= block_size; +} + +LIBC_INLINE CPtr bitwise_or(CPtr a, CPtr b) { + return cpp::bit_cast(cpp::bit_cast(a) | + cpp::bit_cast(b)); +} + +LIBC_INLINE auto misaligned(CPtr a) { + return distance_to_align_down(a); } } // namespace -// Implementation for Cortex-M0, M0+, M1 cores that do not allow for unaligned -// loads/stores. It compiles down to 208 bytes when used through `memcpy` that -// also needs to return the `dst` ptr. -// Note: +// Implementation for Cortex-M0, M0+, M1. +// Notes: +// - It compiles down to 196 bytes, but 220 bytes when used through `memcpy` +// that also needs to return the `dst` ptr. +// - These cores do not allow for unaligned loads/stores. // - When `src` and `dst` are coaligned, we start by aligning them and perform // bulk copies. We let the compiler know the pointers are aligned so it can // use load/store multiple (LDM, STM). This significantly increase throughput @@ -106,18 +125,9 @@ copy_bytes_and_bump_pointers(Ptr &dst, CPtr &src, size_t size) { if (src_alignment == 0) LIBC_ATTR_LIKELY { // Both `src` and `dst` are now word-aligned. - // We first copy by blocks of 64 bytes, the compiler will use 4 - // load/store multiple (LDM, STM), each of 4 words. This requires more - // registers so additional push/pop are needed but the speedup is worth - // it. - consume_by_block<64, BlockOp::kFull, AssumeAccess::kAligned>(dst, src, - size); - // Then we use blocks of 4 word load/store. - consume_by_block<16, BlockOp::kByWord, AssumeAccess::kAligned>(dst, src, - size); - // Then we use word by word copy. - consume_by_block<4, BlockOp::kByWord, AssumeAccess::kAligned>(dst, src, - size); + copy_blocks_and_update_args<64, AssumeWordAligned>(dst, src, size); + copy_blocks_and_update_args<16, AssumeWordAligned>(dst, src, size); + copy_blocks_and_update_args<4, AssumeWordAligned>(dst, src, size); } else { // `dst` is aligned but `src` is not. @@ -128,7 +138,7 @@ copy_bytes_and_bump_pointers(Ptr &dst, CPtr &src, size_t size) { src_alignment == 2 ? load_aligned(src) : load_aligned(src); - copy(dst, &value); + memcpy_inline(assume_aligned(dst), &value); dst += kWordSize; src += kWordSize; size -= kWordSize; @@ -141,8 +151,17 @@ copy_bytes_and_bump_pointers(Ptr &dst, CPtr &src, size_t size) { } // Implementation for Cortex-M3, M4, M7, M23, M33, M35P, M52 with hardware -// support for unaligned loads and stores. It compiles down to 272 bytes when -// used through `memcpy` that also needs to return the `dst` ptr. +// support for unaligned loads and stores. +// Notes: +// - It compiles down to 266 bytes. +// - `dst` and `src` are not `__restrict` to prevent the compiler from +// reordering loads/stores. +// - We keep state variables to a strict minimum to keep everything in the free +// registers and prevent costly push / pop. +// - If unaligned single loads/stores to normal memory are supported, unaligned +// accesses for load/store multiple (LDM, STM) and load/store double (LDRD, +// STRD) instructions are generally not supported and will still fault so we +// make sure to restrict unrolling to word loads/stores. [[maybe_unused]] LIBC_INLINE void inline_memcpy_arm_mid_end(Ptr dst, CPtr src, size_t size) { if (misaligned(bitwise_or(src, dst))) @@ -150,60 +169,38 @@ copy_bytes_and_bump_pointers(Ptr &dst, CPtr &src, size_t size) { if (size < 8) LIBC_ATTR_UNLIKELY { if (size & 1) - copy_block_and_bump_pointers<1>(dst, src); + copy_and_bump_pointers<1>(dst, src); if (size & 2) - copy_block_and_bump_pointers<2>(dst, src); + copy_and_bump_pointers<2>(dst, src); if (size & 4) - copy_block_and_bump_pointers<4>(dst, src); + copy_and_bump_pointers<4>(dst, src); return; } if (misaligned(src)) LIBC_ATTR_UNLIKELY { const size_t offset = distance_to_align_up(dst); if (offset & 1) - copy_block_and_bump_pointers<1>(dst, src); + copy_and_bump_pointers<1>(dst, src); if (offset & 2) - copy_block_and_bump_pointers<2>(dst, src); + copy_and_bump_pointers<2>(dst, src); size -= offset; } } - // `dst` and `src` are not necessarily both aligned at that point but this - // implementation assumes hardware support for unaligned loads and stores so - // it is still fast to perform unrolled word by word copy. Note that wider - // accesses through the use of load/store multiple (LDM, STM) and load/store - // double (LDRD, STRD) instructions are generally not supported and can fault. - // By forcing decomposition of 64 bytes copy into word by word copy, the - // compiler can use the first load to prefetch memory: - // ldr r3, [r1, #64]! <- prefetch next cache line - // str r3, [r0] - // ldr r3, [r1, #0x4] - // str r3, [r0, #0x4] - // ... - // ldr r3, [r1, #0x3c] - // str r3, [r0, #0x3c] - // This is a bit detrimental for sizes between 64 and 256 (less than 10% - // penalty) but the prefetch yields better throughput for larger copies. - consume_by_block<64, BlockOp::kByWord, AssumeAccess::kUnknown>(dst, src, - size); - consume_by_block<16, BlockOp::kByWord, AssumeAccess::kUnknown>(dst, src, - size); - consume_by_block<4, BlockOp::kByWord, AssumeAccess::kUnknown>(dst, src, size); + copy_blocks_and_update_args<64, ForceWordLdStChain>(dst, src, size); + copy_blocks_and_update_args<16, ForceWordLdStChain>(dst, src, size); + copy_blocks_and_update_args<4, AssumeUnaligned>(dst, src, size); if (size & 1) - copy_block_and_bump_pointers<1>(dst, src); + copy_and_bump_pointers<1>(dst, src); if (size & 2) - copy_block_and_bump_pointers<2>(dst, src); + LIBC_ATTR_UNLIKELY + copy_and_bump_pointers<2>(dst, src); } -[[maybe_unused]] LIBC_INLINE void inline_memcpy_arm(Ptr dst, CPtr src, +[[maybe_unused]] LIBC_INLINE void inline_memcpy_arm(void *__restrict dst_, + const void *__restrict src_, size_t size) { - // The compiler performs alias analysis and is able to prove that `dst` and - // `src` do not alias by propagating the `__restrict` keyword from the - // `memcpy` prototype. This allows the compiler to merge consecutive - // load/store (LDR, STR) instructions generated in - // `copy_block_and_bump_pointers` with `BlockOp::kByWord` into load/store - // double (LDRD, STRD) instructions, this is is undesirable so we prevent the - // compiler from inferring `__restrict` with the following line. - asm volatile("" : "+r"(dst), "+r"(src)); + Ptr dst = cpp::bit_cast(dst_); + CPtr src = cpp::bit_cast(src_); #ifdef __ARM_FEATURE_UNALIGNED return inline_memcpy_arm_mid_end(dst, src, size); #else @@ -213,4 +210,8 @@ copy_bytes_and_bump_pointers(Ptr &dst, CPtr &src, size_t size) { } // namespace LIBC_NAMESPACE_DECL +// Cleanup local macros +#undef LIBC_ATTR_LIKELY +#undef LIBC_ATTR_UNLIKELY + #endif // LLVM_LIBC_SRC_STRING_MEMORY_UTILS_ARM_INLINE_MEMCPY_H diff --git a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel index 9e288f7fec0a8..fe843d3207ceb 100644 --- a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel @@ -4448,7 +4448,6 @@ libc_support_library( "src/string/memory_utils/aarch64/inline_memcpy.h", "src/string/memory_utils/aarch64/inline_memmove.h", "src/string/memory_utils/aarch64/inline_memset.h", - "src/string/memory_utils/arm/common.h", "src/string/memory_utils/arm/inline_memcpy.h", "src/string/memory_utils/generic/aligned_access.h", "src/string/memory_utils/generic/byte_per_byte.h",