1
0
Fork 0
mirror of https://git.tukaani.org/xz.git synced 2024-04-04 12:36:23 +02:00

liblzma: Use 8-byte method in memcmplen.h on ARM64.

It requires fast unaligned access to 64-bit integers
and a fast instruction to count leading zeros in
a 64-bit integer (__builtin_ctzll()). This perhaps
should be enabled on some other archs too.

Thanks to Chenxi Mao for the original patch:
https://github.com/tukaani-project/xz/pull/75 (the first commit)
According to the numbers there, this may improve encoding
speed by about 3-5 %.

This enables the 8-byte method on MSVC ARM64 too which
should work but wasn't tested.
This commit is contained in:
Lasse Collin 2023-12-20 21:15:16 +02:00
parent 12c90c00f0
commit cd64dd70d5

View file

@ -24,7 +24,8 @@
// can use the intrinsics without the header file. // can use the intrinsics without the header file.
#if defined(TUKLIB_FAST_UNALIGNED_ACCESS) \ #if defined(TUKLIB_FAST_UNALIGNED_ACCESS) \
&& defined(_MSC_VER) \ && defined(_MSC_VER) \
&& defined(_M_X64) \ && (defined(_M_X64) \
|| defined(_M_ARM64) || defined(_M_ARM64EC)) \
&& !defined(__INTEL_COMPILER) && !defined(__INTEL_COMPILER)
# include <intrin.h> # include <intrin.h>
#endif #endif
@ -58,20 +59,21 @@ lzma_memcmplen(const uint8_t *buf1, const uint8_t *buf2,
#if defined(TUKLIB_FAST_UNALIGNED_ACCESS) \ #if defined(TUKLIB_FAST_UNALIGNED_ACCESS) \
&& (((TUKLIB_GNUC_REQ(3, 4) || defined(__clang__)) \ && (((TUKLIB_GNUC_REQ(3, 4) || defined(__clang__)) \
&& defined(__x86_64__)) \ && (defined(__x86_64__) \
|| defined(__aarch64__))) \
|| (defined(__INTEL_COMPILER) && defined(__x86_64__)) \ || (defined(__INTEL_COMPILER) && defined(__x86_64__)) \
|| (defined(__INTEL_COMPILER) && defined(_M_X64)) \ || (defined(__INTEL_COMPILER) && defined(_M_X64)) \
|| (defined(_MSC_VER) && defined(_M_X64))) || (defined(_MSC_VER) && (defined(_M_X64) \
// I keep this x86-64 only for now since that's where I know this || defined(_M_ARM64) || defined(_M_ARM64EC))))
// to be a good method. This may be fine on other 64-bit CPUs too. // This is only for x86-64 and ARM64 for now. This might be fine on
// On big endian one should use xor instead of subtraction and switch // other 64-bit processors too. On big endian one should use xor
// to __builtin_clzll(). // instead of subtraction and switch to __builtin_clzll().
#define LZMA_MEMCMPLEN_EXTRA 8 #define LZMA_MEMCMPLEN_EXTRA 8
while (len < limit) { while (len < limit) {
const uint64_t x = read64ne(buf1 + len) - read64ne(buf2 + len); const uint64_t x = read64ne(buf1 + len) - read64ne(buf2 + len);
if (x != 0) { if (x != 0) {
// MSVC or Intel C compiler on Windows // MSVC or Intel C compiler on Windows
# if (defined(_MSC_VER) || defined(__INTEL_COMPILER)) && defined(_M_X64) # if defined(_MSC_VER) || defined(__INTEL_COMPILER)
unsigned long tmp; unsigned long tmp;
_BitScanForward64(&tmp, x); _BitScanForward64(&tmp, x);
len += (uint32_t)tmp >> 3; len += (uint32_t)tmp >> 3;