1
0
Fork 0
mirror of https://git.tukaani.org/xz.git synced 2024-04-04 12:36:23 +02:00

Fix data corruption in LZ encoder with LZMA_SYNC_FLUSH.

This commit is contained in:
Lasse Collin 2008-04-24 18:38:00 +03:00
parent bc04486e36
commit 712cfe3ebf
3 changed files with 38 additions and 5 deletions

View file

@ -188,6 +188,7 @@ lzma_lz_encoder_reset(lzma_lz_encoder *lz, lzma_allocator *allocator,
lz->read_pos = 0;
lz->read_limit = 0;
lz->write_pos = 0;
lz->pending = 0;
//////////////////
@ -443,6 +444,21 @@ fill_window(lzma_coder *coder, lzma_allocator *allocator, const uint8_t *in,
if (ret == LZMA_OK && coder->lz.uncompressed_size == 0)
coder->lz.sequence = SEQ_FINISH;
// Restart the match finder after finished LZMA_SYNC_FLUSH.
if (coder->lz.pending > 0
&& coder->lz.read_pos < coder->lz.read_limit) {
// Match finder may update coder->pending and expects it to
// start from zero, so use a temporary variable.
const size_t pending = coder->lz.pending;
coder->lz.pending = 0;
// Rewind read_pos so that the match finder can hash
// the pending bytes.
assert(coder->lz.read_pos >= pending);
coder->lz.read_pos -= pending;
coder->lz.skip(&coder->lz, pending);
}
return ret;
}

View file

@ -84,6 +84,10 @@ struct lzma_lz_encoder_s {
/// to buffer[write_pos].
size_t write_pos;
/// Number of bytes not hashed before read_pos. This is needed to
/// restart the match finder after LZMA_SYNC_FLUSH.
size_t pending;
/// Number of bytes that must be kept available in our input history.
/// That is, once keep_size_before bytes have been processed,
/// buffer[read_pos - keep_size_before] is the oldest byte that

View file

@ -104,6 +104,14 @@ do { \
} while (0)
#define move_pending() \
do { \
++lz->read_pos; \
assert(lz->read_pos <= lz->write_pos); \
++lz->pending; \
} while (0)
//////////////////////
// Global constants //
//////////////////////
@ -123,13 +131,15 @@ LZMA_GET_MATCHES(LZMA_MATCH_FINDER_NAME_LOWER)
len_limit = lz->match_max_len;
} else {
len_limit = lz->write_pos - lz->read_pos;
if (len_limit < MIN_MATCH_CHECK) {
if (len_limit < MIN_MATCH_CHECK || lz->sequence == SEQ_FLUSH) {
distances[0] = 0;
move_pos();
move_pending();
return;
}
}
assert(lz->pending == 0);
int32_t offset = 1;
const uint32_t match_min_pos
= lz->read_pos + lz->offset > lz->cyclic_buffer_size
@ -291,7 +301,7 @@ LZMA_SKIP(LZMA_MATCH_FINDER_NAME_LOWER)
do {
#ifdef IS_HASH_CHAIN
if (lz->write_pos - lz->read_pos < NUM_HASH_BYTES) {
move_pos();
move_pending();
continue;
}
#else
@ -300,8 +310,9 @@ LZMA_SKIP(LZMA_MATCH_FINDER_NAME_LOWER)
len_limit = lz->match_max_len;
} else {
len_limit = lz->write_pos - lz->read_pos;
if (len_limit < MIN_MATCH_CHECK) {
move_pos();
if (len_limit < MIN_MATCH_CHECK
|| lz->sequence == SEQ_FLUSH) {
move_pending();
continue;
}
}
@ -311,6 +322,8 @@ LZMA_SKIP(LZMA_MATCH_FINDER_NAME_LOWER)
: 0;
#endif
assert(lz->pending == 0);
const uint8_t *cur = lz->buffer + lz->read_pos;
#ifdef HASH_ARRAY_2