2008-06-18 17:02:10 +02:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
/// \file integer.h
|
|
|
|
/// \brief Reading and writing integers from and to buffers
|
|
|
|
//
|
2009-04-13 10:27:40 +02:00
|
|
|
// Author: Lasse Collin
|
2008-06-18 17:02:10 +02:00
|
|
|
//
|
2009-04-13 10:27:40 +02:00
|
|
|
// This file has been put into the public domain.
|
|
|
|
// You can do whatever you want with this file.
|
2008-06-18 17:02:10 +02:00
|
|
|
//
|
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
#ifndef LZMA_INTEGER_H
|
|
|
|
#define LZMA_INTEGER_H
|
|
|
|
|
2008-09-11 09:48:12 +02:00
|
|
|
// On big endian, we need byte swapping. These macros may be used outside
|
|
|
|
// this file, so don't put these inside HAVE_FAST_UNALIGNED_ACCESS.
|
|
|
|
#ifdef WORDS_BIGENDIAN
|
|
|
|
# include "bswap.h"
|
|
|
|
# define integer_le_16(n) bswap_16(n)
|
|
|
|
# define integer_le_32(n) bswap_32(n)
|
|
|
|
# define integer_le_64(n) bswap_64(n)
|
|
|
|
#else
|
|
|
|
# define integer_le_16(n) (n)
|
|
|
|
# define integer_le_32(n) (n)
|
|
|
|
# define integer_le_64(n) (n)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2008-06-18 17:02:10 +02:00
|
|
|
// I'm aware of AC_CHECK_ALIGNED_ACCESS_REQUIRED from Autoconf archive, but
|
2008-08-28 21:53:15 +02:00
|
|
|
// it's not useful here. We don't care if unaligned access is supported,
|
2008-06-18 17:02:10 +02:00
|
|
|
// we care if it is fast. Some systems can emulate unaligned access in
|
|
|
|
// software, which is horribly slow; we want to use byte-by-byte access on
|
|
|
|
// such systems but the Autoconf test would detect such a system as
|
|
|
|
// supporting unaligned access.
|
|
|
|
//
|
|
|
|
// NOTE: HAVE_FAST_UNALIGNED_ACCESS indicates only support for 16-bit and
|
|
|
|
// 32-bit integer loads and stores. 64-bit integers may or may not work.
|
|
|
|
// That's why 64-bit functions are commented out.
|
|
|
|
//
|
|
|
|
// TODO: Big endian PowerPC supports byte swapping load and store instructions
|
|
|
|
// that also allow unaligned access. Inline assembler could be OK for that.
|
2008-09-11 09:48:12 +02:00
|
|
|
//
|
|
|
|
// Performance of these functions isn't that important until LZMA3, but it
|
|
|
|
// doesn't hurt to have these ready already.
|
|
|
|
#ifdef HAVE_FAST_UNALIGNED_ACCESS
|
2008-06-18 17:02:10 +02:00
|
|
|
|
|
|
|
static inline uint16_t
|
|
|
|
integer_read_16(const uint8_t buf[static 2])
|
|
|
|
{
|
|
|
|
uint16_t ret = *(const uint16_t *)(buf);
|
2008-08-28 21:53:15 +02:00
|
|
|
return integer_le_16(ret);
|
2008-06-18 17:02:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static inline uint32_t
|
|
|
|
integer_read_32(const uint8_t buf[static 4])
|
|
|
|
{
|
|
|
|
uint32_t ret = *(const uint32_t *)(buf);
|
2008-08-28 21:53:15 +02:00
|
|
|
return integer_le_32(ret);
|
2008-06-18 17:02:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
static inline uint64_t
|
|
|
|
integer_read_64(const uint8_t buf[static 8])
|
|
|
|
{
|
|
|
|
uint64_t ret = *(const uint64_t *)(buf);
|
2008-08-28 21:53:15 +02:00
|
|
|
return integer_le_64(ret);
|
2008-06-18 17:02:10 +02:00
|
|
|
}
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
integer_write_16(uint8_t buf[static 2], uint16_t num)
|
|
|
|
{
|
2008-08-28 21:53:15 +02:00
|
|
|
*(uint16_t *)(buf) = integer_le_16(num);
|
2008-06-18 17:02:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
integer_write_32(uint8_t buf[static 4], uint32_t num)
|
|
|
|
{
|
2008-08-28 21:53:15 +02:00
|
|
|
*(uint32_t *)(buf) = integer_le_32(num);
|
2008-06-18 17:02:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
static inline void
|
|
|
|
integer_write_64(uint8_t buf[static 8], uint64_t num)
|
|
|
|
{
|
2008-08-28 21:53:15 +02:00
|
|
|
*(uint64_t *)(buf) = integer_le_64(num);
|
2008-06-18 17:02:10 +02:00
|
|
|
}
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
static inline uint16_t
|
|
|
|
integer_read_16(const uint8_t buf[static 2])
|
|
|
|
{
|
|
|
|
uint16_t ret = buf[0] | (buf[1] << 8);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static inline uint32_t
|
|
|
|
integer_read_32(const uint8_t buf[static 4])
|
|
|
|
{
|
|
|
|
uint32_t ret = buf[0];
|
|
|
|
ret |= (uint32_t)(buf[1]) << 8;
|
|
|
|
ret |= (uint32_t)(buf[2]) << 16;
|
|
|
|
ret |= (uint32_t)(buf[3]) << 24;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
static inline uint64_t
|
|
|
|
integer_read_64(const uint8_t buf[static 8])
|
|
|
|
{
|
|
|
|
uint64_t ret = buf[0];
|
|
|
|
ret |= (uint64_t)(buf[1]) << 8;
|
|
|
|
ret |= (uint64_t)(buf[2]) << 16;
|
|
|
|
ret |= (uint64_t)(buf[3]) << 24;
|
|
|
|
ret |= (uint64_t)(buf[4]) << 32;
|
|
|
|
ret |= (uint64_t)(buf[5]) << 40;
|
|
|
|
ret |= (uint64_t)(buf[6]) << 48;
|
|
|
|
ret |= (uint64_t)(buf[7]) << 56;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
integer_write_16(uint8_t buf[static 2], uint16_t num)
|
|
|
|
{
|
|
|
|
buf[0] = (uint8_t)(num);
|
|
|
|
buf[1] = (uint8_t)(num >> 8);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
integer_write_32(uint8_t buf[static 4], uint32_t num)
|
|
|
|
{
|
|
|
|
buf[0] = (uint8_t)(num);
|
|
|
|
buf[1] = (uint8_t)(num >> 8);
|
|
|
|
buf[2] = (uint8_t)(num >> 16);
|
|
|
|
buf[3] = (uint8_t)(num >> 24);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
static inline void
|
|
|
|
integer_write_64(uint8_t buf[static 8], uint64_t num)
|
|
|
|
{
|
|
|
|
buf[0] = (uint8_t)(num);
|
|
|
|
buf[1] = (uint8_t)(num >> 8);
|
|
|
|
buf[2] = (uint8_t)(num >> 16);
|
|
|
|
buf[3] = (uint8_t)(num >> 24);
|
|
|
|
buf[4] = (uint8_t)(num >> 32);
|
|
|
|
buf[5] = (uint8_t)(num >> 40);
|
|
|
|
buf[6] = (uint8_t)(num >> 48);
|
|
|
|
buf[7] = (uint8_t)(num >> 56);
|
|
|
|
}
|
|
|
|
*/
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif
|