mirror of
https://git.tukaani.org/xz.git
synced 2024-04-04 12:36:23 +02:00
Rename read32ne to aligned_read32ne, and similarly for the others.
Using the aligned methods requires more care to ensure that the address really is aligned, so it's nicer if the aligned methods are prefixed. The next commit will remove the unaligned_ prefix from the unaligned methods which in liblzma are used in more places than the aligned ones.
This commit is contained in:
parent
77bc5bc6dd
commit
5e78fcbf2e
3 changed files with 32 additions and 32 deletions
|
@ -9,16 +9,16 @@
|
||||||
/// Native endian inline functions (XX = 16, 32, or 64):
|
/// Native endian inline functions (XX = 16, 32, or 64):
|
||||||
/// - Unaligned native endian reads: unaligned_readXXne(ptr)
|
/// - Unaligned native endian reads: unaligned_readXXne(ptr)
|
||||||
/// - Unaligned native endian writes: unaligned_writeXXne(ptr, num)
|
/// - Unaligned native endian writes: unaligned_writeXXne(ptr, num)
|
||||||
/// - Aligned native endian reads: readXXne(ptr)
|
/// - Aligned native endian reads: aligned_readXXne(ptr)
|
||||||
/// - Aligned native endian writes: writeXXne(ptr, num)
|
/// - Aligned native endian writes: aligned_writeXXne(ptr, num)
|
||||||
///
|
///
|
||||||
/// Endianness-converting integer operations (these can be macros!)
|
/// Endianness-converting integer operations (these can be macros!)
|
||||||
/// (XX = 16, 32, or 64; Y = b or l):
|
/// (XX = 16, 32, or 64; Y = b or l):
|
||||||
/// - Byte swapping: bswapXX(num)
|
/// - Byte swapping: bswapXX(num)
|
||||||
/// - Byte order conversions to/from native (byteswaps if Y isn't
|
/// - Byte order conversions to/from native (byteswaps if Y isn't
|
||||||
/// the native endianness): convXXYe(num)
|
/// the native endianness): convXXYe(num)
|
||||||
/// - Aligned reads: readXXYe(ptr)
|
/// - Aligned reads: aligned_readXXYe(ptr)
|
||||||
/// - Aligned writes: writeXXYe(ptr, num)
|
/// - Aligned writes: aligned_writeXXYe(ptr, num)
|
||||||
/// - Unaligned reads (16/32-bit only): unaligned_readXXYe(ptr)
|
/// - Unaligned reads (16/32-bit only): unaligned_readXXYe(ptr)
|
||||||
/// - Unaligned writes (16/32-bit only): unaligned_writeXXYe(ptr, num)
|
/// - Unaligned writes (16/32-bit only): unaligned_writeXXYe(ptr, num)
|
||||||
///
|
///
|
||||||
|
@ -447,7 +447,7 @@ unaligned_write32le(uint8_t *buf, uint32_t num)
|
||||||
|
|
||||||
|
|
||||||
static inline uint16_t
|
static inline uint16_t
|
||||||
read16ne(const uint8_t *buf)
|
aligned_read16ne(const uint8_t *buf)
|
||||||
{
|
{
|
||||||
#if defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING) \
|
#if defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING) \
|
||||||
|| defined(TUKLIB_USE_UNSAFE_ALIGNED_READS)
|
|| defined(TUKLIB_USE_UNSAFE_ALIGNED_READS)
|
||||||
|
@ -461,7 +461,7 @@ read16ne(const uint8_t *buf)
|
||||||
|
|
||||||
|
|
||||||
static inline uint32_t
|
static inline uint32_t
|
||||||
read32ne(const uint8_t *buf)
|
aligned_read32ne(const uint8_t *buf)
|
||||||
{
|
{
|
||||||
#if defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING) \
|
#if defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING) \
|
||||||
|| defined(TUKLIB_USE_UNSAFE_ALIGNED_READS)
|
|| defined(TUKLIB_USE_UNSAFE_ALIGNED_READS)
|
||||||
|
@ -475,7 +475,7 @@ read32ne(const uint8_t *buf)
|
||||||
|
|
||||||
|
|
||||||
static inline uint64_t
|
static inline uint64_t
|
||||||
read64ne(const uint8_t *buf)
|
aligned_read64ne(const uint8_t *buf)
|
||||||
{
|
{
|
||||||
#if defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING) \
|
#if defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING) \
|
||||||
|| defined(TUKLIB_USE_UNSAFE_ALIGNED_READS)
|
|| defined(TUKLIB_USE_UNSAFE_ALIGNED_READS)
|
||||||
|
@ -489,7 +489,7 @@ read64ne(const uint8_t *buf)
|
||||||
|
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
write16ne(uint8_t *buf, uint16_t num)
|
aligned_write16ne(uint8_t *buf, uint16_t num)
|
||||||
{
|
{
|
||||||
#ifdef TUKLIB_USE_UNSAFE_TYPE_PUNNING
|
#ifdef TUKLIB_USE_UNSAFE_TYPE_PUNNING
|
||||||
*(uint16_t *)buf = num;
|
*(uint16_t *)buf = num;
|
||||||
|
@ -501,7 +501,7 @@ write16ne(uint8_t *buf, uint16_t num)
|
||||||
|
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
write32ne(uint8_t *buf, uint32_t num)
|
aligned_write32ne(uint8_t *buf, uint32_t num)
|
||||||
{
|
{
|
||||||
#ifdef TUKLIB_USE_UNSAFE_TYPE_PUNNING
|
#ifdef TUKLIB_USE_UNSAFE_TYPE_PUNNING
|
||||||
*(uint32_t *)buf = num;
|
*(uint32_t *)buf = num;
|
||||||
|
@ -513,7 +513,7 @@ write32ne(uint8_t *buf, uint32_t num)
|
||||||
|
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
write64ne(uint8_t *buf, uint64_t num)
|
aligned_write64ne(uint8_t *buf, uint64_t num)
|
||||||
{
|
{
|
||||||
#ifdef TUKLIB_USE_UNSAFE_TYPE_PUNNING
|
#ifdef TUKLIB_USE_UNSAFE_TYPE_PUNNING
|
||||||
*(uint64_t *)buf = num;
|
*(uint64_t *)buf = num;
|
||||||
|
@ -525,60 +525,60 @@ write64ne(uint8_t *buf, uint64_t num)
|
||||||
|
|
||||||
|
|
||||||
static inline uint16_t
|
static inline uint16_t
|
||||||
read16be(const uint8_t *buf)
|
aligned_read16be(const uint8_t *buf)
|
||||||
{
|
{
|
||||||
uint16_t num = read16ne(buf);
|
uint16_t num = aligned_read16ne(buf);
|
||||||
return conv16be(num);
|
return conv16be(num);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static inline uint16_t
|
static inline uint16_t
|
||||||
read16le(const uint8_t *buf)
|
aligned_read16le(const uint8_t *buf)
|
||||||
{
|
{
|
||||||
uint16_t num = read16ne(buf);
|
uint16_t num = aligned_read16ne(buf);
|
||||||
return conv16le(num);
|
return conv16le(num);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static inline uint32_t
|
static inline uint32_t
|
||||||
read32be(const uint8_t *buf)
|
aligned_read32be(const uint8_t *buf)
|
||||||
{
|
{
|
||||||
uint32_t num = read32ne(buf);
|
uint32_t num = aligned_read32ne(buf);
|
||||||
return conv32be(num);
|
return conv32be(num);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static inline uint32_t
|
static inline uint32_t
|
||||||
read32le(const uint8_t *buf)
|
aligned_read32le(const uint8_t *buf)
|
||||||
{
|
{
|
||||||
uint32_t num = read32ne(buf);
|
uint32_t num = aligned_read32ne(buf);
|
||||||
return conv32le(num);
|
return conv32le(num);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static inline uint64_t
|
static inline uint64_t
|
||||||
read64be(const uint8_t *buf)
|
aligned_read64be(const uint8_t *buf)
|
||||||
{
|
{
|
||||||
uint64_t num = read64ne(buf);
|
uint64_t num = aligned_read64ne(buf);
|
||||||
return conv64be(num);
|
return conv64be(num);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static inline uint64_t
|
static inline uint64_t
|
||||||
read64le(const uint8_t *buf)
|
aligned_read64le(const uint8_t *buf)
|
||||||
{
|
{
|
||||||
uint64_t num = read64ne(buf);
|
uint64_t num = aligned_read64ne(buf);
|
||||||
return conv64le(num);
|
return conv64le(num);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// These need to be macros like in the unaligned case.
|
// These need to be macros like in the unaligned case.
|
||||||
#define write16be(buf, num) write16ne((buf), conv16be(num))
|
#define aligned_write16be(buf, num) aligned_write16ne((buf), conv16be(num))
|
||||||
#define write16le(buf, num) write16ne((buf), conv16le(num))
|
#define aligned_write16le(buf, num) aligned_write16ne((buf), conv16le(num))
|
||||||
#define write32be(buf, num) write32ne((buf), conv32be(num))
|
#define aligned_write32be(buf, num) aligned_write32ne((buf), conv32be(num))
|
||||||
#define write32le(buf, num) write32ne((buf), conv32le(num))
|
#define aligned_write32le(buf, num) aligned_write32ne((buf), conv32le(num))
|
||||||
#define write64be(buf, num) write64ne((buf), conv64be(num))
|
#define aligned_write64be(buf, num) aligned_write64ne((buf), conv64be(num))
|
||||||
#define write64le(buf, num) write64ne((buf), conv64le(num))
|
#define aligned_write64le(buf, num) aligned_write64ne((buf), conv64le(num))
|
||||||
|
|
||||||
|
|
||||||
////////////////////
|
////////////////////
|
||||||
|
|
|
@ -49,7 +49,7 @@ lzma_crc32(const uint8_t *buf, size_t size, uint32_t crc)
|
||||||
|
|
||||||
// Calculate the CRC32 using the slice-by-eight algorithm.
|
// Calculate the CRC32 using the slice-by-eight algorithm.
|
||||||
while (buf < limit) {
|
while (buf < limit) {
|
||||||
crc ^= *(const uint32_t *)(buf);
|
crc ^= aligned_read32ne(buf);
|
||||||
buf += 4;
|
buf += 4;
|
||||||
|
|
||||||
crc = lzma_crc32_table[7][A(crc)]
|
crc = lzma_crc32_table[7][A(crc)]
|
||||||
|
@ -57,7 +57,7 @@ lzma_crc32(const uint8_t *buf, size_t size, uint32_t crc)
|
||||||
^ lzma_crc32_table[5][C(crc)]
|
^ lzma_crc32_table[5][C(crc)]
|
||||||
^ lzma_crc32_table[4][D(crc)];
|
^ lzma_crc32_table[4][D(crc)];
|
||||||
|
|
||||||
const uint32_t tmp = *(const uint32_t *)(buf);
|
const uint32_t tmp = aligned_read32ne(buf);
|
||||||
buf += 4;
|
buf += 4;
|
||||||
|
|
||||||
// At least with some compilers, it is critical for
|
// At least with some compilers, it is critical for
|
||||||
|
|
|
@ -47,9 +47,9 @@ lzma_crc64(const uint8_t *buf, size_t size, uint64_t crc)
|
||||||
while (buf < limit) {
|
while (buf < limit) {
|
||||||
#ifdef WORDS_BIGENDIAN
|
#ifdef WORDS_BIGENDIAN
|
||||||
const uint32_t tmp = (crc >> 32)
|
const uint32_t tmp = (crc >> 32)
|
||||||
^ *(const uint32_t *)(buf);
|
^ aligned_read32ne(buf);
|
||||||
#else
|
#else
|
||||||
const uint32_t tmp = crc ^ *(const uint32_t *)(buf);
|
const uint32_t tmp = crc ^ aligned_read32ne(buf);
|
||||||
#endif
|
#endif
|
||||||
buf += 4;
|
buf += 4;
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue