summaryrefslogtreecommitdiff
path: root/linux
diff options
context:
space:
mode:
Diffstat (limited to 'linux')
-rw-r--r--linux/completion.c311
-rw-r--r--linux/lz4_compress.c170
-rw-r--r--linux/rhashtable.c338
-rw-r--r--linux/wait.c561
-rw-r--r--linux/zlib_deflate/deflate.c1137
-rw-r--r--linux/zlib_deflate/deftree.c1113
-rw-r--r--linux/zlib_deflate/defutil.h327
-rw-r--r--linux/zlib_inflate/inffast.c363
-rw-r--r--linux/zlib_inflate/inffast.h11
-rw-r--r--linux/zlib_inflate/inffixed.h94
-rw-r--r--linux/zlib_inflate/inflate.c786
-rw-r--r--linux/zlib_inflate/inflate.h111
-rw-r--r--linux/zlib_inflate/inftrees.c315
-rw-r--r--linux/zlib_inflate/inftrees.h59
-rw-r--r--linux/zlib_inflate/infutil.c49
-rw-r--r--linux/zlib_inflate/infutil.h25
16 files changed, 164 insertions, 5606 deletions
diff --git a/linux/completion.c b/linux/completion.c
deleted file mode 100644
index fcc77a0..0000000
--- a/linux/completion.c
+++ /dev/null
@@ -1,311 +0,0 @@
-/*
- * Generic wait-for-completion handler;
- *
- * It differs from semaphores in that their default case is the opposite,
- * wait_for_completion default blocks whereas semaphore default non-block. The
- * interface also makes it easy to 'complete' multiple waiting threads,
- * something which isn't entirely natural for semaphores.
- *
- * But more importantly, the primitive documents the usage. Semaphores would
- * typically be used for exclusion which gives rise to priority inversion.
- * Waiting for completion is a typically sync point, but not an exclusion point.
- */
-
-#include <linux/sched.h>
-#include <linux/completion.h>
-
-/**
- * complete: - signals a single thread waiting on this completion
- * @x: holds the state of this particular completion
- *
- * This will wake up a single thread waiting on this completion. Threads will be
- * awakened in the same order in which they were queued.
- *
- * See also complete_all(), wait_for_completion() and related routines.
- *
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
- */
-void complete(struct completion *x)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&x->wait.lock, flags);
- x->done++;
- __wake_up_locked(&x->wait, TASK_NORMAL, 1);
- spin_unlock_irqrestore(&x->wait.lock, flags);
-}
-EXPORT_SYMBOL(complete);
-
-/**
- * complete_all: - signals all threads waiting on this completion
- * @x: holds the state of this particular completion
- *
- * This will wake up all threads waiting on this particular completion event.
- *
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
- */
-void complete_all(struct completion *x)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&x->wait.lock, flags);
- x->done += UINT_MAX/2;
- __wake_up_locked(&x->wait, TASK_NORMAL, 0);
- spin_unlock_irqrestore(&x->wait.lock, flags);
-}
-EXPORT_SYMBOL(complete_all);
-
-static inline long __sched
-do_wait_for_common(struct completion *x,
- long (*action)(long), long timeout, int state)
-{
- if (!x->done) {
- DECLARE_WAITQUEUE(wait, current);
-
- __add_wait_queue_tail_exclusive(&x->wait, &wait);
- do {
- __set_current_state(state);
- spin_unlock_irq(&x->wait.lock);
- timeout = action(timeout);
- spin_lock_irq(&x->wait.lock);
- } while (!x->done && timeout);
- __remove_wait_queue(&x->wait, &wait);
- if (!x->done)
- return timeout;
- }
- x->done--;
- return timeout ?: 1;
-}
-
-static inline long __sched
-__wait_for_common(struct completion *x,
- long (*action)(long), long timeout, int state)
-{
- might_sleep();
-
- spin_lock_irq(&x->wait.lock);
- timeout = do_wait_for_common(x, action, timeout, state);
- spin_unlock_irq(&x->wait.lock);
- return timeout;
-}
-
-static long __sched
-wait_for_common(struct completion *x, long timeout, int state)
-{
- return __wait_for_common(x, schedule_timeout, timeout, state);
-}
-
-static long __sched
-wait_for_common_io(struct completion *x, long timeout, int state)
-{
- return __wait_for_common(x, io_schedule_timeout, timeout, state);
-}
-
-/**
- * wait_for_completion: - waits for completion of a task
- * @x: holds the state of this particular completion
- *
- * This waits to be signaled for completion of a specific task. It is NOT
- * interruptible and there is no timeout.
- *
- * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
- * and interrupt capability. Also see complete().
- */
-void __sched wait_for_completion(struct completion *x)
-{
- wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
-}
-EXPORT_SYMBOL(wait_for_completion);
-
-/**
- * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
- * @x: holds the state of this particular completion
- * @timeout: timeout value in jiffies
- *
- * This waits for either a completion of a specific task to be signaled or for a
- * specified timeout to expire. The timeout is in jiffies. It is not
- * interruptible.
- *
- * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
- * till timeout) if completed.
- */
-unsigned long __sched
-wait_for_completion_timeout(struct completion *x, unsigned long timeout)
-{
- return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
-}
-EXPORT_SYMBOL(wait_for_completion_timeout);
-
-/**
- * wait_for_completion_io: - waits for completion of a task
- * @x: holds the state of this particular completion
- *
- * This waits to be signaled for completion of a specific task. It is NOT
- * interruptible and there is no timeout. The caller is accounted as waiting
- * for IO (which traditionally means blkio only).
- */
-void __sched wait_for_completion_io(struct completion *x)
-{
- wait_for_common_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
-}
-EXPORT_SYMBOL(wait_for_completion_io);
-
-/**
- * wait_for_completion_io_timeout: - waits for completion of a task (w/timeout)
- * @x: holds the state of this particular completion
- * @timeout: timeout value in jiffies
- *
- * This waits for either a completion of a specific task to be signaled or for a
- * specified timeout to expire. The timeout is in jiffies. It is not
- * interruptible. The caller is accounted as waiting for IO (which traditionally
- * means blkio only).
- *
- * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
- * till timeout) if completed.
- */
-unsigned long __sched
-wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
-{
- return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE);
-}
-EXPORT_SYMBOL(wait_for_completion_io_timeout);
-
-/**
- * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
- * @x: holds the state of this particular completion
- *
- * This waits for completion of a specific task to be signaled. It is
- * interruptible.
- *
- * Return: -ERESTARTSYS if interrupted, 0 if completed.
- */
-int __sched wait_for_completion_interruptible(struct completion *x)
-{
- wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
- return 0;
-}
-EXPORT_SYMBOL(wait_for_completion_interruptible);
-
-/**
- * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
- * @x: holds the state of this particular completion
- * @timeout: timeout value in jiffies
- *
- * This waits for either a completion of a specific task to be signaled or for a
- * specified timeout to expire. It is interruptible. The timeout is in jiffies.
- *
- * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
- * or number of jiffies left till timeout) if completed.
- */
-long __sched
-wait_for_completion_interruptible_timeout(struct completion *x,
- unsigned long timeout)
-{
- return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
-}
-EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
-
-/**
- * wait_for_completion_killable: - waits for completion of a task (killable)
- * @x: holds the state of this particular completion
- *
- * This waits to be signaled for completion of a specific task. It can be
- * interrupted by a kill signal.
- *
- * Return: -ERESTARTSYS if interrupted, 0 if completed.
- */
-int __sched wait_for_completion_killable(struct completion *x)
-{
- wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
- return 0;
-}
-EXPORT_SYMBOL(wait_for_completion_killable);
-
-/**
- * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
- * @x: holds the state of this particular completion
- * @timeout: timeout value in jiffies
- *
- * This waits for either a completion of a specific task to be
- * signaled or for a specified timeout to expire. It can be
- * interrupted by a kill signal. The timeout is in jiffies.
- *
- * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
- * or number of jiffies left till timeout) if completed.
- */
-long __sched
-wait_for_completion_killable_timeout(struct completion *x,
- unsigned long timeout)
-{
- return wait_for_common(x, timeout, TASK_KILLABLE);
-}
-EXPORT_SYMBOL(wait_for_completion_killable_timeout);
-
-/**
- * try_wait_for_completion - try to decrement a completion without blocking
- * @x: completion structure
- *
- * Return: 0 if a decrement cannot be done without blocking
- * 1 if a decrement succeeded.
- *
- * If a completion is being used as a counting completion,
- * attempt to decrement the counter without blocking. This
- * enables us to avoid waiting if the resource the completion
- * is protecting is not available.
- */
-bool try_wait_for_completion(struct completion *x)
-{
- unsigned long flags;
- int ret = 1;
-
- /*
- * Since x->done will need to be locked only
- * in the non-blocking case, we check x->done
- * first without taking the lock so we can
- * return early in the blocking case.
- */
- if (!READ_ONCE(x->done))
- return 0;
-
- spin_lock_irqsave(&x->wait.lock, flags);
- if (!x->done)
- ret = 0;
- else
- x->done--;
- spin_unlock_irqrestore(&x->wait.lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(try_wait_for_completion);
-
-/**
- * completion_done - Test to see if a completion has any waiters
- * @x: completion structure
- *
- * Return: 0 if there are waiters (wait_for_completion() in progress)
- * 1 if there are no waiters.
- *
- */
-bool completion_done(struct completion *x)
-{
- if (!READ_ONCE(x->done))
- return false;
-
- /*
- * If ->done, we need to wait for complete() to release ->wait.lock
- * otherwise we can end up freeing the completion before complete()
- * is done referencing it.
- *
- * The RMB pairs with complete()'s RELEASE of ->wait.lock and orders
- * the loads of ->done and ->wait.lock such that we cannot observe
- * the lock before complete() acquires it while observing the ->done
- * after it's acquired the lock.
- */
- smp_rmb();
- //spin_unlock_wait(&x->wait.lock);
- spin_lock(&x->wait.lock);
- spin_unlock(&x->wait.lock);
- return true;
-}
-EXPORT_SYMBOL(completion_done);
diff --git a/linux/lz4_compress.c b/linux/lz4_compress.c
index 65243c7..808fe93 100644
--- a/linux/lz4_compress.c
+++ b/linux/lz4_compress.c
@@ -35,7 +35,6 @@
*/
#include <linux/log2.h>
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/lz4.h>
#include <asm/unaligned.h>
@@ -81,40 +80,36 @@ static inline const u8 *hash_table_add16(const struct lz4_hash_table hash,
return hash.base + offset;
}
-static inline const u8 *try_match(const struct lz4_hash_table hash,
- const u8 *ip)
-{
- const u8 *ref = hash.add(hash, ip);
-
- return ref >= ip - MAX_DISTANCE &&
- A32(ref) == A32(ip) ? ref : NULL;
-}
-
static inline const u8 *find_match(const struct lz4_hash_table hash,
const u8 **ip, const u8 *anchor,
- const u8 *start, const u8 *end)
+ const u8 *start, const u8 *mflimit)
{
-
int findmatchattempts = (1U << SKIPSTRENGTH) + 3;
- const u8 *next_ip = *ip, *ref;
-
- do {
- *ip = next_ip;
- next_ip += findmatchattempts++ >> SKIPSTRENGTH;
-
- if (unlikely(next_ip > end))
- return NULL;
- } while (!(ref = try_match(hash, *ip)));
-
- /* Catch up */
- while (*ip > anchor &&
- ref > start &&
- unlikely((*ip)[-1] == ref[-1])) {
- (*ip)--;
- ref--;
+
+ while (*ip <= mflimit) {
+ const u8 *ref = hash.add(hash, *ip);
+
+ if (ref >= *ip - MAX_DISTANCE && A32(ref) == A32(*ip)) {
+ /* found match: */
+ while (*ip > anchor &&
+ ref > start &&
+ unlikely((*ip)[-1] == ref[-1])) {
+ (*ip)--;
+ ref--;
+ }
+
+ return ref;
+ }
+
+ *ip += findmatchattempts++ >> SKIPSTRENGTH;
}
- return ref;
+ return NULL;
+}
+
+static inline int length_len(unsigned length)
+{
+ return length / 255 + 1;
}
/*
@@ -130,102 +125,77 @@ static inline int lz4_compressctx(const struct lz4_hash_table hash,
const u8 *src, size_t src_len,
u8 *dst, size_t *dst_len)
{
- const u8 *ip = src;
- const u8 *anchor = ip, *ref;
+ const u8 *ip = src, *anchor = ip, *ref;
const u8 *const iend = ip + src_len;
const u8 *const mflimit = iend - MFLIMIT;
const u8 *const matchlimit = iend - LASTLITERALS;
- size_t maxoutputsize = *dst_len;
- u8 *op = dst;
- u8 *const oend = op + maxoutputsize;
- int length;
- u8 *token;
+ u8 *op = dst, *token;
+ u8 *const oend = op + *dst_len;
+ size_t literal_len, match_len, match_offset;
/* Init */
- if (src_len < MINLENGTH)
- goto _last_literals;
-
memset(hash.ctx, 0, LZ4_MEM_COMPRESS);
hash.add(hash, ip);
- /* Main Loop */
- while (1) {
- /* Starting a literal: */
- anchor = ip++;
- ref = find_match(hash, &ip, anchor, src, mflimit);
- if (!ref)
- goto _last_literals;
+ /* Always start with a literal: */
+ ip++;
+ while ((ref = find_match(hash, &ip, anchor, src, mflimit))) {
/*
* We found a match; @ip now points to the match and @ref points
* to the prior part of the input we matched with. Everything up
* to @anchor has been encoded; the range from @anchor to @ip
* didn't match and now has to be encoded as a literal:
*/
- length = ip - anchor;
- token = op++;
-
- /* check output limit */
- if (unlikely(op + length + (2 + 1 + LASTLITERALS) +
- (length >> 8) > oend))
- return -(ip - src);
-
- *token = encode_length(&op, length) << ML_BITS;
-
- /* Copy Literals */
- MEMCPY_ADVANCE_CHUNKED(op, anchor, length);
-
- /* Encode matches: */
- while (1) {
- /* Match offset: */
- PUT_LE16_ADVANCE(op, ip - ref);
-
- /* MINMATCH bytes already matched from find_match(): */
- ip += MINMATCH;
- ref += MINMATCH;
-
- length = common_length(ip, ref, matchlimit);
+ literal_len = ip - anchor;
+ match_offset = ip - ref;
- /* Check output limit */
- if (unlikely(op + (1 + LASTLITERALS) +
- (length >> 8) > oend))
- return -(ip - src);
+ /* MINMATCH bytes already matched from find_match(): */
+ ip += MINMATCH;
+ ref += MINMATCH;
+ match_len = common_length(ip, ref, matchlimit);
+ ip += match_len;
- ip += length;
+ /* check output limit */
+ if (unlikely(op +
+ 1 + /* token */
+ 2 + /* match ofset */
+ literal_len +
+ length_len(literal_len) +
+ length_len(match_len) +
+ LASTLITERALS > oend))
+ break;
- *token += encode_length(&op, length);
+ token = op++;
+ *token = encode_length(&op, literal_len) << ML_BITS;
+ MEMCPY_ADVANCE_CHUNKED(op, anchor, literal_len);
+ PUT_LE16_ADVANCE(op, match_offset);
+ *token += encode_length(&op, match_len);
- /* Test end of chunk */
- if (ip > mflimit) {
- anchor = ip;
- break;
- }
+ anchor = ip;
+ }
- /* Fill table */
- hash.add(hash, ip - 2);
+ /* Encode remaining input as literal: */
+ literal_len = iend - anchor;
+ if (unlikely(op +
+ 1 +
+ literal_len +
+ length_len(literal_len) > oend)) {
+ /* Return how much would be able to fit: */
+ ssize_t remaining = oend - op;
+ ssize_t encoded = anchor - src;
- /* Test next position */
- ref = try_match(hash, ip);
- if (!ref)
- break;
+ remaining -= length_len(remaining) + 1;
- token = op++;
- *token = 0;
- }
+ return -max(encoded + remaining, 1L);
}
-_last_literals:
- /* Encode Last Literals */
- length = iend - anchor;
- if ((op - dst) + length + 1 +
- ((length + 255 - RUN_MASK) / 255) > (u32)maxoutputsize)
- return -(ip - src);
-
token = op++;
- *token = encode_length(&op, length) << ML_BITS;
- MEMCPY_ADVANCE(op, anchor, iend - anchor);
+ *token = encode_length(&op, literal_len) << ML_BITS;
+ MEMCPY_ADVANCE(op, anchor, literal_len);
/* End */
+ BUG_ON(op > oend);
*dst_len = op - dst;
return 0;
}
@@ -252,7 +222,3 @@ int lz4_compress(const unsigned char *src, size_t src_len,
return lz4_compressctx(hash, src, src_len, dst, dst_len);
}
}
-EXPORT_SYMBOL(lz4_compress);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("LZ4 compressor");
diff --git a/linux/rhashtable.c b/linux/rhashtable.c
index 035d82a..03369ea 100644
--- a/linux/rhashtable.c
+++ b/linux/rhashtable.c
@@ -25,7 +25,6 @@
#include <linux/random.h>
#include <linux/rhashtable.h>
#include <linux/err.h>
-#include <linux/export.h>
#define HASH_DEFAULT_SIZE 64UL
#define HASH_MIN_SIZE 4U
@@ -38,36 +37,11 @@ static u32 head_hashfn(struct rhashtable *ht,
return rht_head_hashfn(ht, tbl, he, ht->p);
}
-#ifdef CONFIG_PROVE_LOCKING
-#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
-
-int lockdep_rht_mutex_is_held(struct rhashtable *ht)
-{
- return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
-}
-EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
-
-int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
-{
- spinlock_t *lock = rht_bucket_lock(tbl, hash);
-
- return (debug_locks) ? lockdep_is_held(lock) : 1;
-}
-EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
-#else
-#define ASSERT_RHT_MUTEX(HT)
-#endif
-
-
static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
gfp_t gfp)
{
unsigned int i, size;
-#if defined(CONFIG_PROVE_LOCKING)
- unsigned int nr_pcpus = 2;
-#else
unsigned int nr_pcpus = num_possible_cpus();
-#endif
nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
@@ -77,11 +51,6 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
if (sizeof(spinlock_t) != 0) {
tbl->locks = NULL;
-#ifdef CONFIG_NUMA
- if (size * sizeof(spinlock_t) > PAGE_SIZE &&
- gfp == GFP_KERNEL)
- tbl->locks = vmalloc(size * sizeof(spinlock_t));
-#endif
if (gfp != GFP_KERNEL)
gfp |= __GFP_NOWARN | __GFP_NORETRY;
@@ -270,28 +239,11 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
}
-/**
- * rhashtable_expand - Expand hash table while allowing concurrent lookups
- * @ht: the hash table to expand
- *
- * A secondary bucket array is allocated and the hash entries are migrated.
- *
- * This function may only be called in a context where it is safe to call
- * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
- *
- * The caller must ensure that no concurrent resizing occurs by holding
- * ht->mutex.
- *
- * It is valid to have concurrent insertions and deletions protected by per
- * bucket locks or concurrent RCU protected lookups and traversals.
- */
static int rhashtable_expand(struct rhashtable *ht)
{
struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
int err;
- ASSERT_RHT_MUTEX(ht);
-
old_tbl = rhashtable_last_table(ht, old_tbl);
new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
@@ -305,22 +257,6 @@ static int rhashtable_expand(struct rhashtable *ht)
return err;
}
-/**
- * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
- * @ht: the hash table to shrink
- *
- * This function shrinks the hash table to fit, i.e., the smallest
- * size would not cause it to expand right away automatically.
- *
- * The caller must ensure that no concurrent resizing occurs by holding
- * ht->mutex.
- *
- * The caller must ensure that no concurrent table mutations take place.
- * It is however valid to have concurrent lookups if they are RCU protected.
- *
- * It is valid to have concurrent insertions and deletions protected by per
- * bucket locks or concurrent RCU protected lookups and traversals.
- */
static int rhashtable_shrink(struct rhashtable *ht)
{
struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
@@ -328,8 +264,6 @@ static int rhashtable_shrink(struct rhashtable *ht)
unsigned int size = 0;
int err;
- ASSERT_RHT_MUTEX(ht);
-
if (nelems)
size = roundup_pow_of_two(nelems * 3 / 2);
if (size < ht->p.min_size)
@@ -438,7 +372,6 @@ fail:
return err;
}
-EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
const void *key,
@@ -486,191 +419,6 @@ exit:
else
return ERR_PTR(err);
}
-EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
-
-/**
- * rhashtable_walk_init - Initialise an iterator
- * @ht: Table to walk over
- * @iter: Hash table Iterator
- * @gfp: GFP flags for allocations
- *
- * This function prepares a hash table walk.
- *
- * Note that if you restart a walk after rhashtable_walk_stop you
- * may see the same object twice. Also, you may miss objects if
- * there are removals in between rhashtable_walk_stop and the next
- * call to rhashtable_walk_start.
- *
- * For a completely stable walk you should construct your own data
- * structure outside the hash table.
- *
- * This function may sleep so you must not call it from interrupt
- * context or with spin locks held.
- *
- * You must call rhashtable_walk_exit if this function returns
- * successfully.
- */
-int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter,
- gfp_t gfp)
-{
- iter->ht = ht;
- iter->p = NULL;
- iter->slot = 0;
- iter->skip = 0;
-
- iter->walker = kmalloc(sizeof(*iter->walker), gfp);
- if (!iter->walker)
- return -ENOMEM;
-
- spin_lock(&ht->lock);
- iter->walker->tbl =
- rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
- list_add(&iter->walker->list, &iter->walker->tbl->walkers);
- spin_unlock(&ht->lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(rhashtable_walk_init);
-
-/**
- * rhashtable_walk_exit - Free an iterator
- * @iter: Hash table Iterator
- *
- * This function frees resources allocated by rhashtable_walk_init.
- */
-void rhashtable_walk_exit(struct rhashtable_iter *iter)
-{
- spin_lock(&iter->ht->lock);
- if (iter->walker->tbl)
- list_del(&iter->walker->list);
- spin_unlock(&iter->ht->lock);
- kfree(iter->walker);
-}
-EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
-
-/**
- * rhashtable_walk_start - Start a hash table walk
- * @iter: Hash table iterator
- *
- * Start a hash table walk. Note that we take the RCU lock in all
- * cases including when we return an error. So you must always call
- * rhashtable_walk_stop to clean up.
- *
- * Returns zero if successful.
- *
- * Returns -EAGAIN if resize event occured. Note that the iterator
- * will rewind back to the beginning and you may use it immediately
- * by calling rhashtable_walk_next.
- */
-int rhashtable_walk_start(struct rhashtable_iter *iter)
- __acquires(RCU)
-{
- struct rhashtable *ht = iter->ht;
-
- rcu_read_lock();
-
- spin_lock(&ht->lock);
- if (iter->walker->tbl)
- list_del(&iter->walker->list);
- spin_unlock(&ht->lock);
-
- if (!iter->walker->tbl) {
- iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
- return -EAGAIN;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(rhashtable_walk_start);
-
-/**
- * rhashtable_walk_next - Return the next object and advance the iterator
- * @iter: Hash table iterator
- *
- * Note that you must call rhashtable_walk_stop when you are finished
- * with the walk.
- *
- * Returns the next object or NULL when the end of the table is reached.
- *
- * Returns -EAGAIN if resize event occured. Note that the iterator
- * will rewind back to the beginning and you may continue to use it.
- */
-void *rhashtable_walk_next(struct rhashtable_iter *iter)
-{
- struct bucket_table *tbl = iter->walker->tbl;
- struct rhashtable *ht = iter->ht;
- struct rhash_head *p = iter->p;
-
- if (p) {
- p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
- goto next;
- }
-
- for (; iter->slot < tbl->size; iter->slot++) {
- int skip = iter->skip;
-
- rht_for_each_rcu(p, tbl, iter->slot) {
- if (!skip)
- break;
- skip--;
- }
-
-next:
- if (!rht_is_a_nulls(p)) {
- iter->skip++;
- iter->p = p;
- return rht_obj(ht, p);
- }
-
- iter->skip = 0;
- }
-
- iter->p = NULL;
-
- /* Ensure we see any new tables. */
- smp_rmb();
-
- iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
- if (iter->walker->tbl) {
- iter->slot = 0;
- iter->skip = 0;
- return ERR_PTR(-EAGAIN);
- }
-
- return NULL;
-}
-EXPORT_SYMBOL_GPL(rhashtable_walk_next);
-
-/**
- * rhashtable_walk_stop - Finish a hash table walk
- * @iter: Hash table iterator
- *
- * Finish a hash table walk.
- */
-void rhashtable_walk_stop(struct rhashtable_iter *iter)
- __releases(RCU)
-{
- struct rhashtable *ht;
- struct bucket_table *tbl = iter->walker->tbl;
-
- if (!tbl)
- goto out;
-
- ht = iter->ht;
-
- spin_lock(&ht->lock);
- if (tbl->rehash < tbl->size)
- list_add(&iter->walker->list, &tbl->walkers);
- else
- iter->walker->tbl = NULL;
- spin_unlock(&ht->lock);
-
- iter->p = NULL;
-
-out:
- rcu_read_unlock();
-}
-EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
static size_t rounded_hashtable_size(const struct rhashtable_params *params)
{
@@ -683,49 +431,6 @@ static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
return jhash2(key, length, seed);
}
-/**
- * rhashtable_init - initialize a new hash table
- * @ht: hash table to be initialized
- * @params: configuration parameters
- *
- * Initializes a new hash table based on the provided configuration
- * parameters. A table can be configured either with a variable or
- * fixed length key:
- *
- * Configuration Example 1: Fixed length keys
- * struct test_obj {
- * int key;
- * void * my_member;
- * struct rhash_head node;
- * };
- *
- * struct rhashtable_params params = {
- * .head_offset = offsetof(struct test_obj, node),
- * .key_offset = offsetof(struct test_obj, key),
- * .key_len = sizeof(int),
- * .hashfn = jhash,
- * .nulls_base = (1U << RHT_BASE_SHIFT),
- * };
- *
- * Configuration Example 2: Variable length keys
- * struct test_obj {
- * [...]
- * struct rhash_head node;
- * };
- *
- * u32 my_hash_fn(const void *data, u32 len, u32 seed)
- * {
- * struct test_obj *obj = data;
- *
- * return [... hash ...];
- * }
- *
- * struct rhashtable_params params = {
- * .head_offset = offsetof(struct test_obj, node),
- * .hashfn = jhash,
- * .obj_hashfn = my_hash_fn,
- * };
- */
int rhashtable_init(struct rhashtable *ht,
const struct rhashtable_params *params)
{
@@ -805,56 +510,15 @@ int rhashtable_init(struct rhashtable *ht,
return 0;
}
-EXPORT_SYMBOL_GPL(rhashtable_init);
-/**
- * rhashtable_free_and_destroy - free elements and destroy hash table
- * @ht: the hash table to destroy
- * @free_fn: callback to release resources of element
- * @arg: pointer passed to free_fn
- *
- * Stops an eventual async resize. If defined, invokes free_fn for each
- * element to releasal resources. Please note that RCU protected
- * readers may still be accessing the elements. Releasing of resources
- * must occur in a compatible manner. Then frees the bucket array.
- *
- * This function will eventually sleep to wait for an async resize
- * to complete. The caller is responsible that no further write operations
- * occurs in parallel.
- */
-void rhashtable_free_and_destroy(struct rhashtable *ht,
- void (*free_fn)(void *ptr, void *arg),
- void *arg)
+void rhashtable_destroy(struct rhashtable *ht)
{
struct bucket_table *tbl;
- unsigned int i;
cancel_work_sync(&ht->run_work);
mutex_lock(&ht->mutex);
tbl = rht_dereference(ht->tbl, ht);
- if (free_fn) {
- for (i = 0; i < tbl->size; i++) {
- struct rhash_head *pos, *next;
-
- for (pos = rht_dereference(tbl->buckets[i], ht),
- next = !rht_is_a_nulls(pos) ?
- rht_dereference(pos->next, ht) : NULL;
- !rht_is_a_nulls(pos);
- pos = next,
- next = !rht_is_a_nulls(pos) ?
- rht_dereference(pos->next, ht) : NULL)
- free_fn(rht_obj(ht, pos), arg);
- }
- }
-
bucket_table_free(tbl);
mutex_unlock(&ht->mutex);
}
-EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
-
-void rhashtable_destroy(struct rhashtable *ht)
-{
- return rhashtable_free_and_destroy(ht, NULL, NULL);
-}
-EXPORT_SYMBOL_GPL(rhashtable_destroy);
diff --git a/linux/wait.c b/linux/wait.c
index 83f4e85..991875c 100644
--- a/linux/wait.c
+++ b/linux/wait.c
@@ -3,66 +3,42 @@
*
* (C) 2004 Nadia Yvette Chambers, Oracle
*/
-#include <linux/export.h>
+
+#include <linux/completion.h>
#include <linux/sched.h>
-#include <linux/mm.h>
#include <linux/wait.h>
-#include <linux/hash.h>
-#include <linux/kthread.h>
-void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key)
+static inline int waitqueue_active(wait_queue_head_t *q)
{
- spin_lock_init(&q->lock);
- lockdep_set_class_and_name(&q->lock, key, name);
- INIT_LIST_HEAD(&q->task_list);
+ return !list_empty(&q->task_list);
}
-EXPORT_SYMBOL(__init_waitqueue_head);
-
-void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
+static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
{
- unsigned long flags;
-
- wait->flags &= ~WQ_FLAG_EXCLUSIVE;
- spin_lock_irqsave(&q->lock, flags);
- __add_wait_queue(q, wait);
- spin_unlock_irqrestore(&q->lock, flags);
+ list_add(&new->task_list, &head->task_list);
}
-EXPORT_SYMBOL(add_wait_queue);
-void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
+static inline void __add_wait_queue_tail(wait_queue_head_t *head,
+ wait_queue_t *new)
{
- unsigned long flags;
+ list_add_tail(&new->task_list, &head->task_list);
+}
+static inline void
+__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
+{
wait->flags |= WQ_FLAG_EXCLUSIVE;
- spin_lock_irqsave(&q->lock, flags);
__add_wait_queue_tail(q, wait);
- spin_unlock_irqrestore(&q->lock, flags);
}
-EXPORT_SYMBOL(add_wait_queue_exclusive);
-void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
+static inline void
+__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
{
- unsigned long flags;
-
- spin_lock_irqsave(&q->lock, flags);
- __remove_wait_queue(q, wait);
- spin_unlock_irqrestore(&q->lock, flags);
+ list_del(&old->task_list);
}
-EXPORT_SYMBOL(remove_wait_queue);
-
-/*
- * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
- * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
- * number) then we wake all the non-exclusive tasks and one exclusive task.
- *
- * There are circumstances in which we can try to wake a task which has already
- * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
- * zero in this (rare) case, and we handle it by continuing to scan the queue.
- */
static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
- int nr_exclusive, int wake_flags, void *key)
+ int nr_exclusive, int wake_flags, void *key)
{
wait_queue_t *curr, *next;
@@ -75,18 +51,8 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
}
}
-/**
- * __wake_up - wake up threads blocked on a waitqueue.
- * @q: the waitqueue
- * @mode: which threads
- * @nr_exclusive: how many wake-one or wake-many threads to wake up
- * @key: is directly passed to the wakeup function
- *
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
- */
-void __wake_up(wait_queue_head_t *q, unsigned int mode,
- int nr_exclusive, void *key)
+static void __wake_up(wait_queue_head_t *q, unsigned int mode,
+ int nr_exclusive, void *key)
{
unsigned long flags;
@@ -94,79 +60,17 @@ void __wake_up(wait_queue_head_t *q, unsigned int mode,
__wake_up_common(q, mode, nr_exclusive, 0, key);
spin_unlock_irqrestore(&q->lock, flags);
}
-EXPORT_SYMBOL(__wake_up);
-/*
- * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
- */
-void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
+void wake_up(wait_queue_head_t *q)
{
- __wake_up_common(q, mode, nr, 0, NULL);
+ __wake_up(q, TASK_NORMAL, 1, NULL);
}
-EXPORT_SYMBOL_GPL(__wake_up_locked);
-void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
+static void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
{
- __wake_up_common(q, mode, 1, 0, key);
-}
-EXPORT_SYMBOL_GPL(__wake_up_locked_key);
-
-/**
- * __wake_up_sync_key - wake up threads blocked on a waitqueue.
- * @q: the waitqueue
- * @mode: which threads
- * @nr_exclusive: how many wake-one or wake-many threads to wake up
- * @key: opaque value to be passed to wakeup targets
- *
- * The sync wakeup differs that the waker knows that it will schedule
- * away soon, so while the target thread will be woken up, it will not
- * be migrated to another CPU - ie. the two threads are 'synchronized'
- * with each other. This can prevent needless bouncing between CPUs.
- *
- * On UP it can prevent extra preemption.
- *
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
- */
-void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
- int nr_exclusive, void *key)
-{
- unsigned long flags;
- int wake_flags = 1; /* XXX WF_SYNC */
-
- if (unlikely(!q))
- return;
-
- if (unlikely(nr_exclusive != 1))
- wake_flags = 0;
-
- spin_lock_irqsave(&q->lock, flags);
- __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
- spin_unlock_irqrestore(&q->lock, flags);
-}
-EXPORT_SYMBOL_GPL(__wake_up_sync_key);
-
-/*
- * __wake_up_sync - see __wake_up_sync_key()
- */
-void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
-{
- __wake_up_sync_key(q, mode, nr_exclusive, NULL);
+ __wake_up_common(q, mode, nr, 0, NULL);
}
-EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
-/*
- * Note: we use "set_current_state()" _after_ the wait-queue add,
- * because we need a memory barrier there on SMP, so that any
- * wake-function that tests for the wait-queue being active
- * will be guaranteed to see waitqueue addition _or_ subsequent
- * tests in this thread will see the wakeup having taken place.
- *
- * The spin_unlock() itself is semi-permeable and only protects
- * one way (it only protects stuff inside the critical region and
- * stops them from bleeding out - it would still allow subsequent
- * loads to move into the critical region).
- */
void
prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
{
@@ -179,9 +83,8 @@ prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
set_current_state(state);
spin_unlock_irqrestore(&q->lock, flags);
}
-EXPORT_SYMBOL(prepare_to_wait);
-void
+static void
prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
{
unsigned long flags;
@@ -193,38 +96,7 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
set_current_state(state);
spin_unlock_irqrestore(&q->lock, flags);
}
-EXPORT_SYMBOL(prepare_to_wait_exclusive);
-
-long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state)
-{
- unsigned long flags;
-
- wait->private = current;
- wait->func = autoremove_wake_function;
-
- spin_lock_irqsave(&q->lock, flags);
- if (list_empty(&wait->task_list)) {
- if (wait->flags & WQ_FLAG_EXCLUSIVE)
- __add_wait_queue_tail(q, wait);
- else
- __add_wait_queue(q, wait);
- }
- set_current_state(state);
- spin_unlock_irqrestore(&q->lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL(prepare_to_wait_event);
-/**
- * finish_wait - clean up after waiting in a queue
- * @q: waitqueue waited on
- * @wait: wait descriptor
- *
- * Sets current thread back to running state and removes
- * the wait descriptor from the given waitqueue if still
- * queued.
- */
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
{
unsigned long flags;
@@ -249,40 +121,6 @@ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
spin_unlock_irqrestore(&q->lock, flags);
}
}
-EXPORT_SYMBOL(finish_wait);
-
-/**
- * abort_exclusive_wait - abort exclusive waiting in a queue
- * @q: waitqueue waited on
- * @wait: wait descriptor
- * @mode: runstate of the waiter to be woken
- * @key: key to identify a wait bit queue or %NULL
- *
- * Sets current thread back to running state and removes
- * the wait descriptor from the given waitqueue if still
- * queued.
- *
- * Wakes up the next waiter if the caller is concurrently
- * woken up through the queue.
- *
- * This prevents waiter starvation where an exclusive waiter
- * aborts and is woken up concurrently and no one wakes up
- * the next waiter.
- */
-void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
- unsigned int mode, void *key)
-{
- unsigned long flags;
-
- __set_current_state(TASK_RUNNING);
- spin_lock_irqsave(&q->lock, flags);
- if (!list_empty(&wait->task_list))
- list_del_init(&wait->task_list);
- else if (waitqueue_active(q))
- __wake_up_locked_key(q, mode, key);
- spin_unlock_irqrestore(&q->lock, flags);
-}
-EXPORT_SYMBOL(abort_exclusive_wait);
int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
void *key)
@@ -298,319 +136,110 @@ int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *
list_del_init(&wait->task_list);
return ret;
}
-EXPORT_SYMBOL(autoremove_wake_function);
-static inline bool is_kthread_should_stop(void)
-{
- return (current->flags & PF_KTHREAD) && kthread_should_stop();
-}
+struct wait_bit_key {
+ void *flags;
+ int bit_nr;
+ unsigned long timeout;
+};
-/*
- * DEFINE_WAIT_FUNC(wait, woken_wake_func);
- *
- * add_wait_queue(&wq, &wait);
- * for (;;) {
- * if (condition)
- * break;
- *
- * p->state = mode; condition = true;
- * smp_mb(); // A smp_wmb(); // C
- * if (!wait->flags & WQ_FLAG_WOKEN) wait->flags |= WQ_FLAG_WOKEN;
- * schedule() try_to_wake_up();
- * p->state = TASK_RUNNING; ~~~~~~~~~~~~~~~~~~
- * wait->flags &= ~WQ_FLAG_WOKEN; condition = true;
- * smp_mb() // B smp_wmb(); // C
- * wait->flags |= WQ_FLAG_WOKEN;
- * }
- * remove_wait_queue(&wq, &wait);
- *
- */
-long wait_woken(wait_queue_t *wait, unsigned mode, long timeout)
-{
- set_current_state(mode); /* A */
- /*
- * The above implies an smp_mb(), which matches with the smp_wmb() from
- * woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must
- * also observe all state before the wakeup.
- */
- if (!(wait->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
- timeout = schedule_timeout(timeout);
- __set_current_state(TASK_RUNNING);
-
- /*
- * The below implies an smp_mb(), it too pairs with the smp_wmb() from
- * woken_wake_function() such that we must either observe the wait
- * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss
- * an event.
- */
- smp_store_mb(wait->flags, wait->flags & ~WQ_FLAG_WOKEN); /* B */
+struct wait_bit_queue {
+ struct wait_bit_key key;
+ wait_queue_t wait;
+};
- return timeout;
-}
-EXPORT_SYMBOL(wait_woken);
-
-int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
+static int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
{
- /*
- * Although this function is called under waitqueue lock, LOCK
- * doesn't imply write barrier and the users expects write
- * barrier semantics on wakeup functions. The following
- * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
- * and is paired with smp_store_mb() in wait_woken().
- */
- smp_wmb(); /* C */
- wait->flags |= WQ_FLAG_WOKEN;
+ struct wait_bit_key *key = arg;
+ struct wait_bit_queue *wait_bit =
+ container_of(wait, struct wait_bit_queue, wait);
- return default_wake_function(wait, mode, sync, key);
+ return (wait_bit->key.flags == key->flags &&
+ wait_bit->key.bit_nr == key->bit_nr &&
+ !test_bit(key->bit_nr, key->flags))
+ ? autoremove_wake_function(wait, mode, sync, key) : 0;
}
-EXPORT_SYMBOL(woken_wake_function);
-int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
-{
- struct wait_bit_key *key = arg;
- struct wait_bit_queue *wait_bit
- = container_of(wait, struct wait_bit_queue, wait);
-
- if (wait_bit->key.flags != key->flags ||
- wait_bit->key.bit_nr != key->bit_nr ||
- test_bit(key->bit_nr, key->flags))
- return 0;
- else
- return autoremove_wake_function(wait, mode, sync, key);
-}
-EXPORT_SYMBOL(wake_bit_function);
+static DECLARE_WAIT_QUEUE_HEAD(bit_wq);
-/*
- * To allow interruptible waiting and asynchronous (i.e. nonblocking)
- * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
- * permitted return codes. Nonzero return codes halt waiting and return.
- */
-int __sched
-__wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
- wait_bit_action_f *action, unsigned mode)
-{
- int ret = 0;
+#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
+ { .flags = word, .bit_nr = bit, }
- do {
- prepare_to_wait(wq, &q->wait, mode);
- if (test_bit(q->key.bit_nr, q->key.flags))
- ret = (*action)(&q->key, mode);
- } while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
- finish_wait(wq, &q->wait);
- return ret;
-}
-EXPORT_SYMBOL(__wait_on_bit);
+#define DEFINE_WAIT_BIT(name, word, bit) \
+ struct wait_bit_queue name = { \
+ .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
+ .wait = { \
+ .private = current, \
+ .func = wake_bit_function, \
+ .task_list = \
+ LIST_HEAD_INIT((name).wait.task_list), \
+ }, \
+ }
-int __sched out_of_line_wait_on_bit(void *word, int bit,
- wait_bit_action_f *action, unsigned mode)
+void wake_up_bit(void *word, int bit)
{
- wait_queue_head_t *wq = bit_waitqueue(word, bit);
- DEFINE_WAIT_BIT(wait, word, bit);
+ struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
- return __wait_on_bit(wq, &wait, action, mode);
+ if (waitqueue_active(&bit_wq))
+ __wake_up(&bit_wq, TASK_NORMAL, 1, &key);
}
-EXPORT_SYMBOL(out_of_line_wait_on_bit);
-int __sched out_of_line_wait_on_bit_timeout(
- void *word, int bit, wait_bit_action_f *action,
- unsigned mode, unsigned long timeout)
+void __wait_on_bit(void *word, int bit, unsigned mode)
{
- wait_queue_head_t *wq = bit_waitqueue(word, bit);
DEFINE_WAIT_BIT(wait, word, bit);
- wait.key.timeout = jiffies + timeout;
- return __wait_on_bit(wq, &wait, action, mode);
-}
-EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout);
-
-int __sched
-__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
- wait_bit_action_f *action, unsigned mode)
-{
do {
- int ret;
+ prepare_to_wait(&bit_wq, &wait.wait, mode);
+ if (test_bit(wait.key.bit_nr, wait.key.flags))
+ schedule();
+ } while (test_bit(wait.key.bit_nr, wait.key.flags));
- prepare_to_wait_exclusive(wq, &q->wait, mode);
- if (!test_bit(q->key.bit_nr, q->key.flags))
- continue;
- ret = action(&q->key, mode);
- if (!ret)
- continue;
- abort_exclusive_wait(wq, &q->wait, mode, &q->key);
- return ret;
- } while (test_and_set_bit(q->key.bit_nr, q->key.flags));
- finish_wait(wq, &q->wait);
- return 0;
+ finish_wait(&bit_wq, &wait.wait);
}
-EXPORT_SYMBOL(__wait_on_bit_lock);
-int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
- wait_bit_action_f *action, unsigned mode)
+void __wait_on_bit_lock(void *word, int bit, unsigned mode)
{
- wait_queue_head_t *wq = bit_waitqueue(word, bit);
DEFINE_WAIT_BIT(wait, word, bit);
- return __wait_on_bit_lock(wq, &wait, action, mode);
-}
-EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
-
-void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
-{
- struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
- if (waitqueue_active(wq))
- __wake_up(wq, TASK_NORMAL, 1, &key);
-}
-EXPORT_SYMBOL(__wake_up_bit);
-
-/**
- * wake_up_bit - wake up a waiter on a bit
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
- *
- * There is a standard hashed waitqueue table for generic use. This
- * is the part of the hashtable's accessor API that wakes up waiters
- * on a bit. For instance, if one were to have waiters on a bitflag,
- * one would call wake_up_bit() after clearing the bit.
- *
- * In order for this to function properly, as it uses waitqueue_active()
- * internally, some kind of memory barrier must be done prior to calling
- * this. Typically, this will be smp_mb__after_atomic(), but in some
- * cases where bitflags are manipulated non-atomically under a lock, one
- * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
- * because spin_unlock() does not guarantee a memory barrier.
- */
-void wake_up_bit(void *word, int bit)
-{
- __wake_up_bit(bit_waitqueue(word, bit), word, bit);
-}
-EXPORT_SYMBOL(wake_up_bit);
-
-static DECLARE_WAIT_QUEUE_HEAD(__bit_waitqueue);
-
-wait_queue_head_t *bit_waitqueue(void *word, int bit)
-{
- return &__bit_waitqueue;
-}
-EXPORT_SYMBOL(bit_waitqueue);
-
-/*
- * Manipulate the atomic_t address to produce a better bit waitqueue table hash
- * index (we're keying off bit -1, but that would produce a horrible hash
- * value).
- */
-static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p)
-{
- if (BITS_PER_LONG == 64) {
- unsigned long q = (unsigned long)p;
- return bit_waitqueue((void *)(q & ~1), q & 1);
- }
- return bit_waitqueue(p, 0);
-}
-
-static int wake_atomic_t_function(wait_queue_t *wait, unsigned mode, int sync,
- void *arg)
-{
- struct wait_bit_key *key = arg;
- struct wait_bit_queue *wait_bit
- = container_of(wait, struct wait_bit_queue, wait);
- atomic_t *val = key->flags;
-
- if (wait_bit->key.flags != key->flags ||
- wait_bit->key.bit_nr != key->bit_nr ||
- atomic_read(val) != 0)
- return 0;
- return autoremove_wake_function(wait, mode, sync, key);
-}
-
-/*
- * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting,
- * the actions of __wait_on_atomic_t() are permitted return codes. Nonzero
- * return codes halt waiting and return.
- */
-static __sched
-int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q,
- int (*action)(atomic_t *), unsigned mode)
-{
- atomic_t *val;
- int ret = 0;
-
do {
- prepare_to_wait(wq, &q->wait, mode);
- val = q->key.flags;
- if (atomic_read(val) == 0)
- break;
- ret = (*action)(val);
- } while (!ret && atomic_read(val) != 0);
- finish_wait(wq, &q->wait);
- return ret;
+ prepare_to_wait_exclusive(&bit_wq, &wait.wait, mode);
+ if (!test_bit(wait.key.bit_nr, wait.key.flags))
+ continue;
+ schedule();
+ } while (test_and_set_bit(wait.key.bit_nr, wait.key.flags));
+ finish_wait(&bit_wq, &wait.wait);
}
-#define DEFINE_WAIT_ATOMIC_T(name, p) \
- struct wait_bit_queue name = { \
- .key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p), \
- .wait = { \
- .private = current, \
- .func = wake_atomic_t_function, \
- .task_list = \
- LIST_HEAD_INIT((name).wait.task_list), \
- }, \
- }
-
-__sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *),
- unsigned mode)
+void complete(struct completion *x)
{
- wait_queue_head_t *wq = atomic_t_waitqueue(p);
- DEFINE_WAIT_ATOMIC_T(wait, p);
+ unsigned long flags;
- return __wait_on_atomic_t(wq, &wait, action, mode);
+ spin_lock_irqsave(&x->wait.lock, flags);
+ x->done++;
+ __wake_up_locked(&x->wait, TASK_NORMAL, 1);
+ spin_unlock_irqrestore(&x->wait.lock, flags);
}
-EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
-/**
- * wake_up_atomic_t - Wake up a waiter on a atomic_t
- * @p: The atomic_t being waited on, a kernel virtual address
- *
- * Wake up anyone waiting for the atomic_t to go to zero.
- *
- * Abuse the bit-waker function and its waitqueue hash table set (the atomic_t
- * check is done by the waiter's wake function, not the by the waker itself).
- */
-void wake_up_atomic_t(atomic_t *p)
+void wait_for_completion(struct completion *x)
{
- __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR);
-}
-EXPORT_SYMBOL(wake_up_atomic_t);
+ spin_lock_irq(&x->wait.lock);
-__sched int bit_wait(struct wait_bit_key *word, int mode)
-{
- schedule();
- return 0;
-}
-EXPORT_SYMBOL(bit_wait);
-
-__sched int bit_wait_io(struct wait_bit_key *word, int mode)
-{
- io_schedule();
- return 0;
-}
-EXPORT_SYMBOL(bit_wait_io);
+ if (!x->done) {
+ DECLARE_WAITQUEUE(wait, current);
-__sched int bit_wait_timeout(struct wait_bit_key *word, int mode)
-{
- unsigned long now = jiffies;
- if (time_after_eq(now, word->timeout))
- return -EAGAIN;
- schedule_timeout(word->timeout - now);
- return 0;
-}
-EXPORT_SYMBOL_GPL(bit_wait_timeout);
+ __add_wait_queue_tail_exclusive(&x->wait, &wait);
+ do {
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ spin_unlock_irq(&x->wait.lock);
-__sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode)
-{
- unsigned long now = jiffies;
- if (time_after_eq(now, word->timeout))
- return -EAGAIN;
- io_schedule_timeout(word->timeout - now);
- return 0;
+ schedule();
+ spin_lock_irq(&x->wait.lock);
+ } while (!x->done);
+ __remove_wait_queue(&x->wait, &wait);
+ if (!x->done)
+ goto out;
+ }
+ x->done--;
+out:
+ spin_unlock_irq(&x->wait.lock);
}
-EXPORT_SYMBOL_GPL(bit_wait_io_timeout);
diff --git a/linux/zlib_deflate/deflate.c b/linux/zlib_deflate/deflate.c
deleted file mode 100644
index d20ef45..0000000
--- a/linux/zlib_deflate/deflate.c
+++ /dev/null
@@ -1,1137 +0,0 @@
-/* +++ deflate.c */
-/* deflate.c -- compress data using the deflation algorithm
- * Copyright (C) 1995-1996 Jean-loup Gailly.
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/*
- * ALGORITHM
- *
- * The "deflation" process depends on being able to identify portions
- * of the input text which are identical to earlier input (within a
- * sliding window trailing behind the input currently being processed).
- *
- * The most straightforward technique turns out to be the fastest for
- * most input files: try all possible matches and select the longest.
- * The key feature of this algorithm is that insertions into the string
- * dictionary are very simple and thus fast, and deletions are avoided
- * completely. Insertions are performed at each input character, whereas
- * string matches are performed only when the previous match ends. So it
- * is preferable to spend more time in matches to allow very fast string
- * insertions and avoid deletions. The matching algorithm for small
- * strings is inspired from that of Rabin & Karp. A brute force approach
- * is used to find longer strings when a small match has been found.
- * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze
- * (by Leonid Broukhis).
- * A previous version of this file used a more sophisticated algorithm
- * (by Fiala and Greene) which is guaranteed to run in linear amortized
- * time, but has a larger average cost, uses more memory and is patented.
- * However the F&G algorithm may be faster for some highly redundant
- * files if the parameter max_chain_length (described below) is too large.
- *
- * ACKNOWLEDGEMENTS
- *
- * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and
- * I found it in 'freeze' written by Leonid Broukhis.
- * Thanks to many people for bug reports and testing.
- *
- * REFERENCES
- *
- * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification".
- * Available in ftp://ds.internic.net/rfc/rfc1951.txt
- *
- * A description of the Rabin and Karp algorithm is given in the book
- * "Algorithms" by R. Sedgewick, Addison-Wesley, p252.
- *
- * Fiala,E.R., and Greene,D.H.
- * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595
- *
- */
-
-#include <linux/module.h>
-#include <linux/zutil.h>
-#include "defutil.h"
-
-
-/* ===========================================================================
- * Function prototypes.
- */
-typedef enum {
- need_more, /* block not completed, need more input or more output */
- block_done, /* block flush performed */
- finish_started, /* finish started, need only more output at next deflate */
- finish_done /* finish done, accept no more input or output */
-} block_state;
-
-typedef block_state (*compress_func) (deflate_state *s, int flush);
-/* Compression function. Returns the block state after the call. */
-
-static void fill_window (deflate_state *s);
-static block_state deflate_stored (deflate_state *s, int flush);
-static block_state deflate_fast (deflate_state *s, int flush);
-static block_state deflate_slow (deflate_state *s, int flush);
-static void lm_init (deflate_state *s);
-static void putShortMSB (deflate_state *s, uInt b);
-static void flush_pending (z_streamp strm);
-static int read_buf (z_streamp strm, Byte *buf, unsigned size);
-static uInt longest_match (deflate_state *s, IPos cur_match);
-
-#ifdef DEBUG_ZLIB
-static void check_match (deflate_state *s, IPos start, IPos match,
- int length);
-#endif
-
-/* ===========================================================================
- * Local data
- */
-
-#define NIL 0
-/* Tail of hash chains */
-
-#ifndef TOO_FAR
-# define TOO_FAR 4096
-#endif
-/* Matches of length 3 are discarded if their distance exceeds TOO_FAR */
-
-#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
-/* Minimum amount of lookahead, except at the end of the input file.
- * See deflate.c for comments about the MIN_MATCH+1.
- */
-
-/* Values for max_lazy_match, good_match and max_chain_length, depending on
- * the desired pack level (0..9). The values given below have been tuned to
- * exclude worst case performance for pathological files. Better values may be
- * found for specific files.
- */
-typedef struct config_s {
- ush good_length; /* reduce lazy search above this match length */
- ush max_lazy; /* do not perform lazy search above this match length */
- ush nice_length; /* quit search above this match length */
- ush max_chain;
- compress_func func;
-} config;
-
-static const config configuration_table[10] = {
-/* good lazy nice chain */
-/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
-/* 1 */ {4, 4, 8, 4, deflate_fast}, /* maximum speed, no lazy matches */
-/* 2 */ {4, 5, 16, 8, deflate_fast},
-/* 3 */ {4, 6, 32, 32, deflate_fast},
-
-/* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */
-/* 5 */ {8, 16, 32, 32, deflate_slow},
-/* 6 */ {8, 16, 128, 128, deflate_slow},
-/* 7 */ {8, 32, 128, 256, deflate_slow},
-/* 8 */ {32, 128, 258, 1024, deflate_slow},
-/* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* maximum compression */
-
-/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4
- * For deflate_fast() (levels <= 3) good is ignored and lazy has a different
- * meaning.
- */
-
-#define EQUAL 0
-/* result of memcmp for equal strings */
-
-/* ===========================================================================
- * Update a hash value with the given input byte
- * IN assertion: all calls to UPDATE_HASH are made with consecutive
- * input characters, so that a running hash key can be computed from the
- * previous key instead of complete recalculation each time.
- */
-#define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
-
-
-/* ===========================================================================
- * Insert string str in the dictionary and set match_head to the previous head
- * of the hash chain (the most recent string with same hash key). Return
- * the previous length of the hash chain.
- * IN assertion: all calls to INSERT_STRING are made with consecutive
- * input characters and the first MIN_MATCH bytes of str are valid
- * (except for the last MIN_MATCH-1 bytes of the input file).
- */
-#define INSERT_STRING(s, str, match_head) \
- (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
- s->prev[(str) & s->w_mask] = match_head = s->head[s->ins_h], \
- s->head[s->ins_h] = (Pos)(str))
-
-/* ===========================================================================
- * Initialize the hash table (avoiding 64K overflow for 16 bit systems).
- * prev[] will be initialized on the fly.
- */
-#define CLEAR_HASH(s) \
- s->head[s->hash_size-1] = NIL; \
- memset((char *)s->head, 0, (unsigned)(s->hash_size-1)*sizeof(*s->head));
-
-/* ========================================================================= */
-int zlib_deflateInit2(
- z_streamp strm,
- int level,
- int method,
- int windowBits,
- int memLevel,
- int strategy
-)
-{
- deflate_state *s;
- int noheader = 0;
- deflate_workspace *mem;
- char *next;
-
- ush *overlay;
- /* We overlay pending_buf and d_buf+l_buf. This works since the average
- * output size for (length,distance) codes is <= 24 bits.
- */
-
- if (strm == NULL) return Z_STREAM_ERROR;
-
- strm->msg = NULL;
-
- if (level == Z_DEFAULT_COMPRESSION) level = 6;
-
- mem = (deflate_workspace *) strm->workspace;
-
- if (windowBits < 0) { /* undocumented feature: suppress zlib header */
- noheader = 1;
- windowBits = -windowBits;
- }
- if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED ||
- windowBits < 9 || windowBits > 15 || level < 0 || level > 9 ||
- strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
- return Z_STREAM_ERROR;
- }
-
- /*
- * Direct the workspace's pointers to the chunks that were allocated
- * along with the deflate_workspace struct.
- */
- next = (char *) mem;
- next += sizeof(*mem);
- mem->window_memory = (Byte *) next;
- next += zlib_deflate_window_memsize(windowBits);
- mem->prev_memory = (Pos *) next;
- next += zlib_deflate_prev_memsize(windowBits);
- mem->head_memory = (Pos *) next;
- next += zlib_deflate_head_memsize(memLevel);
- mem->overlay_memory = next;
-
- s = (deflate_state *) &(mem->deflate_memory);
- strm->state = (struct internal_state *)s;
- s->strm = strm;
-
- s->noheader = noheader;
- s->w_bits = windowBits;
- s->w_size = 1 << s->w_bits;
- s->w_mask = s->w_size - 1;
-
- s->hash_bits = memLevel + 7;
- s->hash_size = 1 << s->hash_bits;
- s->hash_mask = s->hash_size - 1;
- s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH);
-
- s->window = (Byte *) mem->window_memory;
- s->prev = (Pos *) mem->prev_memory;
- s->head = (Pos *) mem->head_memory;
-
- s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
-
- overlay = (ush *) mem->overlay_memory;
- s->pending_buf = (uch *) overlay;
- s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);
-
- s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
- s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
-
- s->level = level;
- s->strategy = strategy;
- s->method = (Byte)method;
-
- return zlib_deflateReset(strm);
-}
-
-/* ========================================================================= */
-int zlib_deflateReset(
- z_streamp strm
-)
-{
- deflate_state *s;
-
- if (strm == NULL || strm->state == NULL)
- return Z_STREAM_ERROR;
-
- strm->total_in = strm->total_out = 0;
- strm->msg = NULL;
- strm->data_type = Z_UNKNOWN;
-
- s = (deflate_state *)strm->state;
- s->pending = 0;
- s->pending_out = s->pending_buf;
-
- if (s->noheader < 0) {
- s->noheader = 0; /* was set to -1 by deflate(..., Z_FINISH); */
- }
- s->status = s->noheader ? BUSY_STATE : INIT_STATE;
- strm->adler = 1;
- s->last_flush = Z_NO_FLUSH;
-
- zlib_tr_init(s);
- lm_init(s);
-
- return Z_OK;
-}
-
-/* =========================================================================
- * Put a short in the pending buffer. The 16-bit value is put in MSB order.
- * IN assertion: the stream state is correct and there is enough room in
- * pending_buf.
- */
-static void putShortMSB(
- deflate_state *s,
- uInt b
-)
-{
- put_byte(s, (Byte)(b >> 8));
- put_byte(s, (Byte)(b & 0xff));
-}
-
-/* =========================================================================
- * Flush as much pending output as possible. All deflate() output goes
- * through this function so some applications may wish to modify it
- * to avoid allocating a large strm->next_out buffer and copying into it.
- * (See also read_buf()).
- */
-static void flush_pending(
- z_streamp strm
-)
-{
- deflate_state *s = (deflate_state *) strm->state;
- unsigned len = s->pending;
-
- if (len > strm->avail_out) len = strm->avail_out;
- if (len == 0) return;
-
- if (strm->next_out != NULL) {
- memcpy(strm->next_out, s->pending_out, len);
- strm->next_out += len;
- }
- s->pending_out += len;
- strm->total_out += len;
- strm->avail_out -= len;
- s->pending -= len;
- if (s->pending == 0) {
- s->pending_out = s->pending_buf;
- }
-}
-
-/* ========================================================================= */
-int zlib_deflate(
- z_streamp strm,
- int flush
-)
-{
- int old_flush; /* value of flush param for previous deflate call */
- deflate_state *s;
-
- if (strm == NULL || strm->state == NULL ||
- flush > Z_FINISH || flush < 0) {
- return Z_STREAM_ERROR;
- }
- s = (deflate_state *) strm->state;
-
- if ((strm->next_in == NULL && strm->avail_in != 0) ||
- (s->status == FINISH_STATE && flush != Z_FINISH)) {
- return Z_STREAM_ERROR;
- }
- if (strm->avail_out == 0) return Z_BUF_ERROR;
-
- s->strm = strm; /* just in case */
- old_flush = s->last_flush;
- s->last_flush = flush;
-
- /* Write the zlib header */
- if (s->status == INIT_STATE) {
-
- uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
- uInt level_flags = (s->level-1) >> 1;
-
- if (level_flags > 3) level_flags = 3;
- header |= (level_flags << 6);
- if (s->strstart != 0) header |= PRESET_DICT;
- header += 31 - (header % 31);
-
- s->status = BUSY_STATE;
- putShortMSB(s, header);
-
- /* Save the adler32 of the preset dictionary: */
- if (s->strstart != 0) {
- putShortMSB(s, (uInt)(strm->adler >> 16));
- putShortMSB(s, (uInt)(strm->adler & 0xffff));
- }
- strm->adler = 1L;
- }
-
- /* Flush as much pending output as possible */
- if (s->pending != 0) {
- flush_pending(strm);
- if (strm->avail_out == 0) {
- /* Since avail_out is 0, deflate will be called again with
- * more output space, but possibly with both pending and
- * avail_in equal to zero. There won't be anything to do,
- * but this is not an error situation so make sure we
- * return OK instead of BUF_ERROR at next call of deflate:
- */
- s->last_flush = -1;
- return Z_OK;
- }
-
- /* Make sure there is something to do and avoid duplicate consecutive
- * flushes. For repeated and useless calls with Z_FINISH, we keep
- * returning Z_STREAM_END instead of Z_BUFF_ERROR.
- */
- } else if (strm->avail_in == 0 && flush <= old_flush &&
- flush != Z_FINISH) {
- return Z_BUF_ERROR;
- }
-
- /* User must not provide more input after the first FINISH: */
- if (s->status == FINISH_STATE && strm->avail_in != 0) {
- return Z_BUF_ERROR;
- }
-
- /* Start a new block or continue the current one.
- */
- if (strm->avail_in != 0 || s->lookahead != 0 ||
- (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) {
- block_state bstate;
-
- bstate = (*(configuration_table[s->level].func))(s, flush);
-
- if (bstate == finish_started || bstate == finish_done) {
- s->status = FINISH_STATE;
- }
- if (bstate == need_more || bstate == finish_started) {
- if (strm->avail_out == 0) {
- s->last_flush = -1; /* avoid BUF_ERROR next call, see above */
- }
- return Z_OK;
- /* If flush != Z_NO_FLUSH && avail_out == 0, the next call
- * of deflate should use the same flush parameter to make sure
- * that the flush is complete. So we don't have to output an
- * empty block here, this will be done at next call. This also
- * ensures that for a very small output buffer, we emit at most
- * one empty block.
- */
- }
- if (bstate == block_done) {
- if (flush == Z_PARTIAL_FLUSH) {
- zlib_tr_align(s);
- } else if (flush == Z_PACKET_FLUSH) {
- /* Output just the 3-bit `stored' block type value,
- but not a zero length. */
- zlib_tr_stored_type_only(s);
- } else { /* FULL_FLUSH or SYNC_FLUSH */
- zlib_tr_stored_block(s, (char*)0, 0L, 0);
- /* For a full flush, this empty block will be recognized
- * as a special marker by inflate_sync().
- */
- if (flush == Z_FULL_FLUSH) {
- CLEAR_HASH(s); /* forget history */
- }
- }
- flush_pending(strm);
- if (strm->avail_out == 0) {
- s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */
- return Z_OK;
- }
- }
- }
- Assert(strm->avail_out > 0, "bug2");
-
- if (flush != Z_FINISH) return Z_OK;
- if (s->noheader) return Z_STREAM_END;
-
- /* Write the zlib trailer (adler32) */
- putShortMSB(s, (uInt)(strm->adler >> 16));
- putShortMSB(s, (uInt)(strm->adler & 0xffff));
- flush_pending(strm);
- /* If avail_out is zero, the application will call deflate again
- * to flush the rest.
- */
- s->noheader = -1; /* write the trailer only once! */
- return s->pending != 0 ? Z_OK : Z_STREAM_END;
-}
-
-/* ========================================================================= */
-int zlib_deflateEnd(
- z_streamp strm
-)
-{
- int status;
- deflate_state *s;
-
- if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
- s = (deflate_state *) strm->state;
-
- status = s->status;
- if (status != INIT_STATE && status != BUSY_STATE &&
- status != FINISH_STATE) {
- return Z_STREAM_ERROR;
- }
-
- strm->state = NULL;
-
- return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
-}
-
-/* ===========================================================================
- * Read a new buffer from the current input stream, update the adler32
- * and total number of bytes read. All deflate() input goes through
- * this function so some applications may wish to modify it to avoid
- * allocating a large strm->next_in buffer and copying from it.
- * (See also flush_pending()).
- */
-static int read_buf(
- z_streamp strm,
- Byte *buf,
- unsigned size
-)
-{
- unsigned len = strm->avail_in;
-
- if (len > size) len = size;
- if (len == 0) return 0;
-
- strm->avail_in -= len;
-
- if (!((deflate_state *)(strm->state))->noheader) {
- strm->adler = zlib_adler32(strm->adler, strm->next_in, len);
- }
- memcpy(buf, strm->next_in, len);
- strm->next_in += len;
- strm->total_in += len;
-
- return (int)len;
-}
-
-/* ===========================================================================
- * Initialize the "longest match" routines for a new zlib stream
- */
-static void lm_init(
- deflate_state *s
-)
-{
- s->window_size = (ulg)2L*s->w_size;
-
- CLEAR_HASH(s);
-
- /* Set the default configuration parameters:
- */
- s->max_lazy_match = configuration_table[s->level].max_lazy;
- s->good_match = configuration_table[s->level].good_length;
- s->nice_match = configuration_table[s->level].nice_length;
- s->max_chain_length = configuration_table[s->level].max_chain;
-
- s->strstart = 0;
- s->block_start = 0L;
- s->lookahead = 0;
- s->match_length = s->prev_length = MIN_MATCH-1;
- s->match_available = 0;
- s->ins_h = 0;
-}
-
-/* ===========================================================================
- * Set match_start to the longest match starting at the given string and
- * return its length. Matches shorter or equal to prev_length are discarded,
- * in which case the result is equal to prev_length and match_start is
- * garbage.
- * IN assertions: cur_match is the head of the hash chain for the current
- * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
- * OUT assertion: the match length is not greater than s->lookahead.
- */
-/* For 80x86 and 680x0, an optimized version will be provided in match.asm or
- * match.S. The code will be functionally equivalent.
- */
-static uInt longest_match(
- deflate_state *s,
- IPos cur_match /* current match */
-)
-{
- unsigned chain_length = s->max_chain_length;/* max hash chain length */
- register Byte *scan = s->window + s->strstart; /* current string */
- register Byte *match; /* matched string */
- register int len; /* length of current match */
- int best_len = s->prev_length; /* best match length so far */
- int nice_match = s->nice_match; /* stop if match long enough */
- IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
- s->strstart - (IPos)MAX_DIST(s) : NIL;
- /* Stop when cur_match becomes <= limit. To simplify the code,
- * we prevent matches with the string of window index 0.
- */
- Pos *prev = s->prev;
- uInt wmask = s->w_mask;
-
-#ifdef UNALIGNED_OK
- /* Compare two bytes at a time. Note: this is not always beneficial.
- * Try with and without -DUNALIGNED_OK to check.
- */
- register Byte *strend = s->window + s->strstart + MAX_MATCH - 1;
- register ush scan_start = *(ush*)scan;
- register ush scan_end = *(ush*)(scan+best_len-1);
-#else
- register Byte *strend = s->window + s->strstart + MAX_MATCH;
- register Byte scan_end1 = scan[best_len-1];
- register Byte scan_end = scan[best_len];
-#endif
-
- /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
- * It is easy to get rid of this optimization if necessary.
- */
- Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
-
- /* Do not waste too much time if we already have a good match: */
- if (s->prev_length >= s->good_match) {
- chain_length >>= 2;
- }
- /* Do not look for matches beyond the end of the input. This is necessary
- * to make deflate deterministic.
- */
- if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead;
-
- Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
-
- do {
- Assert(cur_match < s->strstart, "no future");
- match = s->window + cur_match;
-
- /* Skip to next match if the match length cannot increase
- * or if the match length is less than 2:
- */
-#if (defined(UNALIGNED_OK) && MAX_MATCH == 258)
- /* This code assumes sizeof(unsigned short) == 2. Do not use
- * UNALIGNED_OK if your compiler uses a different size.
- */
- if (*(ush*)(match+best_len-1) != scan_end ||
- *(ush*)match != scan_start) continue;
-
- /* It is not necessary to compare scan[2] and match[2] since they are
- * always equal when the other bytes match, given that the hash keys
- * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
- * strstart+3, +5, ... up to strstart+257. We check for insufficient
- * lookahead only every 4th comparison; the 128th check will be made
- * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is
- * necessary to put more guard bytes at the end of the window, or
- * to check more often for insufficient lookahead.
- */
- Assert(scan[2] == match[2], "scan[2]?");
- scan++, match++;
- do {
- } while (*(ush*)(scan+=2) == *(ush*)(match+=2) &&
- *(ush*)(scan+=2) == *(ush*)(match+=2) &&
- *(ush*)(scan+=2) == *(ush*)(match+=2) &&
- *(ush*)(scan+=2) == *(ush*)(match+=2) &&
- scan < strend);
- /* The funny "do {}" generates better code on most compilers */
-
- /* Here, scan <= window+strstart+257 */
- Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
- if (*scan == *match) scan++;
-
- len = (MAX_MATCH - 1) - (int)(strend-scan);
- scan = strend - (MAX_MATCH-1);
-
-#else /* UNALIGNED_OK */
-
- if (match[best_len] != scan_end ||
- match[best_len-1] != scan_end1 ||
- *match != *scan ||
- *++match != scan[1]) continue;
-
- /* The check at best_len-1 can be removed because it will be made
- * again later. (This heuristic is not always a win.)
- * It is not necessary to compare scan[2] and match[2] since they
- * are always equal when the other bytes match, given that
- * the hash keys are equal and that HASH_BITS >= 8.
- */
- scan += 2, match++;
- Assert(*scan == *match, "match[2]?");
-
- /* We check for insufficient lookahead only every 8th comparison;
- * the 256th check will be made at strstart+258.
- */
- do {
- } while (*++scan == *++match && *++scan == *++match &&
- *++scan == *++match && *++scan == *++match &&
- *++scan == *++match && *++scan == *++match &&
- *++scan == *++match && *++scan == *++match &&
- scan < strend);
-
- Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
-
- len = MAX_MATCH - (int)(strend - scan);
- scan = strend - MAX_MATCH;
-
-#endif /* UNALIGNED_OK */
-
- if (len > best_len) {
- s->match_start = cur_match;
- best_len = len;
- if (len >= nice_match) break;
-#ifdef UNALIGNED_OK
- scan_end = *(ush*)(scan+best_len-1);
-#else
- scan_end1 = scan[best_len-1];
- scan_end = scan[best_len];
-#endif
- }
- } while ((cur_match = prev[cur_match & wmask]) > limit
- && --chain_length != 0);
-
- if ((uInt)best_len <= s->lookahead) return best_len;
- return s->lookahead;
-}
-
-#ifdef DEBUG_ZLIB
-/* ===========================================================================
- * Check that the match at match_start is indeed a match.
- */
-static void check_match(
- deflate_state *s,
- IPos start,
- IPos match,
- int length
-)
-{
- /* check that the match is indeed a match */
- if (memcmp((char *)s->window + match,
- (char *)s->window + start, length) != EQUAL) {
- fprintf(stderr, " start %u, match %u, length %d\n",
- start, match, length);
- do {
- fprintf(stderr, "%c%c", s->window[match++], s->window[start++]);
- } while (--length != 0);
- z_error("invalid match");
- }
- if (z_verbose > 1) {
- fprintf(stderr,"\\[%d,%d]", start-match, length);
- do { putc(s->window[start++], stderr); } while (--length != 0);
- }
-}
-#else
-# define check_match(s, start, match, length)
-#endif
-
-/* ===========================================================================
- * Fill the window when the lookahead becomes insufficient.
- * Updates strstart and lookahead.
- *
- * IN assertion: lookahead < MIN_LOOKAHEAD
- * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
- * At least one byte has been read, or avail_in == 0; reads are
- * performed for at least two bytes (required for the zip translate_eol
- * option -- not supported here).
- */
-static void fill_window(
- deflate_state *s
-)
-{
- register unsigned n, m;
- register Pos *p;
- unsigned more; /* Amount of free space at the end of the window. */
- uInt wsize = s->w_size;
-
- do {
- more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
-
- /* Deal with !@#$% 64K limit: */
- if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
- more = wsize;
-
- } else if (more == (unsigned)(-1)) {
- /* Very unlikely, but possible on 16 bit machine if strstart == 0
- * and lookahead == 1 (input done one byte at time)
- */
- more--;
-
- /* If the window is almost full and there is insufficient lookahead,
- * move the upper half to the lower one to make room in the upper half.
- */
- } else if (s->strstart >= wsize+MAX_DIST(s)) {
-
- memcpy((char *)s->window, (char *)s->window+wsize,
- (unsigned)wsize);
- s->match_start -= wsize;
- s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
- s->block_start -= (long) wsize;
-
- /* Slide the hash table (could be avoided with 32 bit values
- at the expense of memory usage). We slide even when level == 0
- to keep the hash table consistent if we switch back to level > 0
- later. (Using level 0 permanently is not an optimal usage of
- zlib, so we don't care about this pathological case.)
- */
- n = s->hash_size;
- p = &s->head[n];
- do {
- m = *--p;
- *p = (Pos)(m >= wsize ? m-wsize : NIL);
- } while (--n);
-
- n = wsize;
- p = &s->prev[n];
- do {
- m = *--p;
- *p = (Pos)(m >= wsize ? m-wsize : NIL);
- /* If n is not on any hash chain, prev[n] is garbage but
- * its value will never be used.
- */
- } while (--n);
- more += wsize;
- }
- if (s->strm->avail_in == 0) return;
-
- /* If there was no sliding:
- * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
- * more == window_size - lookahead - strstart
- * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
- * => more >= window_size - 2*WSIZE + 2
- * In the BIG_MEM or MMAP case (not yet supported),
- * window_size == input_size + MIN_LOOKAHEAD &&
- * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
- * Otherwise, window_size == 2*WSIZE so more >= 2.
- * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
- */
- Assert(more >= 2, "more < 2");
-
- n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more);
- s->lookahead += n;
-
- /* Initialize the hash value now that we have some input: */
- if (s->lookahead >= MIN_MATCH) {
- s->ins_h = s->window[s->strstart];
- UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
-#if MIN_MATCH != 3
- Call UPDATE_HASH() MIN_MATCH-3 more times
-#endif
- }
- /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
- * but this is not important since only literal bytes will be emitted.
- */
-
- } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
-}
-
-/* ===========================================================================
- * Flush the current block, with given end-of-file flag.
- * IN assertion: strstart is set to the end of the current match.
- */
-#define FLUSH_BLOCK_ONLY(s, eof) { \
- zlib_tr_flush_block(s, (s->block_start >= 0L ? \
- (char *)&s->window[(unsigned)s->block_start] : \
- NULL), \
- (ulg)((long)s->strstart - s->block_start), \
- (eof)); \
- s->block_start = s->strstart; \
- flush_pending(s->strm); \
- Tracev((stderr,"[FLUSH]")); \
-}
-
-/* Same but force premature exit if necessary. */
-#define FLUSH_BLOCK(s, eof) { \
- FLUSH_BLOCK_ONLY(s, eof); \
- if (s->strm->avail_out == 0) return (eof) ? finish_started : need_more; \
-}
-
-/* ===========================================================================
- * Copy without compression as much as possible from the input stream, return
- * the current block state.
- * This function does not insert new strings in the dictionary since
- * uncompressible data is probably not useful. This function is used
- * only for the level=0 compression option.
- * NOTE: this function should be optimized to avoid extra copying from
- * window to pending_buf.
- */
-static block_state deflate_stored(
- deflate_state *s,
- int flush
-)
-{
- /* Stored blocks are limited to 0xffff bytes, pending_buf is limited
- * to pending_buf_size, and each stored block has a 5 byte header:
- */
- ulg max_block_size = 0xffff;
- ulg max_start;
-
- if (max_block_size > s->pending_buf_size - 5) {
- max_block_size = s->pending_buf_size - 5;
- }
-
- /* Copy as much as possible from input to output: */
- for (;;) {
- /* Fill the window as much as possible: */
- if (s->lookahead <= 1) {
-
- Assert(s->strstart < s->w_size+MAX_DIST(s) ||
- s->block_start >= (long)s->w_size, "slide too late");
-
- fill_window(s);
- if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more;
-
- if (s->lookahead == 0) break; /* flush the current block */
- }
- Assert(s->block_start >= 0L, "block gone");
-
- s->strstart += s->lookahead;
- s->lookahead = 0;
-
- /* Emit a stored block if pending_buf will be full: */
- max_start = s->block_start + max_block_size;
- if (s->strstart == 0 || (ulg)s->strstart >= max_start) {
- /* strstart == 0 is possible when wraparound on 16-bit machine */
- s->lookahead = (uInt)(s->strstart - max_start);
- s->strstart = (uInt)max_start;
- FLUSH_BLOCK(s, 0);
- }
- /* Flush if we may have to slide, otherwise block_start may become
- * negative and the data will be gone:
- */
- if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) {
- FLUSH_BLOCK(s, 0);
- }
- }
- FLUSH_BLOCK(s, flush == Z_FINISH);
- return flush == Z_FINISH ? finish_done : block_done;
-}
-
-/* ===========================================================================
- * Compress as much as possible from the input stream, return the current
- * block state.
- * This function does not perform lazy evaluation of matches and inserts
- * new strings in the dictionary only for unmatched strings or for short
- * matches. It is used only for the fast compression options.
- */
-static block_state deflate_fast(
- deflate_state *s,
- int flush
-)
-{
- IPos hash_head = NIL; /* head of the hash chain */
- int bflush; /* set if current block must be flushed */
-
- for (;;) {
- /* Make sure that we always have enough lookahead, except
- * at the end of the input file. We need MAX_MATCH bytes
- * for the next match, plus MIN_MATCH bytes to insert the
- * string following the next match.
- */
- if (s->lookahead < MIN_LOOKAHEAD) {
- fill_window(s);
- if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
- return need_more;
- }
- if (s->lookahead == 0) break; /* flush the current block */
- }
-
- /* Insert the string window[strstart .. strstart+2] in the
- * dictionary, and set hash_head to the head of the hash chain:
- */
- if (s->lookahead >= MIN_MATCH) {
- INSERT_STRING(s, s->strstart, hash_head);
- }
-
- /* Find the longest match, discarding those <= prev_length.
- * At this point we have always match_length < MIN_MATCH
- */
- if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) {
- /* To simplify the code, we prevent matches with the string
- * of window index 0 (in particular we have to avoid a match
- * of the string with itself at the start of the input file).
- */
- if (s->strategy != Z_HUFFMAN_ONLY) {
- s->match_length = longest_match (s, hash_head);
- }
- /* longest_match() sets match_start */
- }
- if (s->match_length >= MIN_MATCH) {
- check_match(s, s->strstart, s->match_start, s->match_length);
-
- bflush = zlib_tr_tally(s, s->strstart - s->match_start,
- s->match_length - MIN_MATCH);
-
- s->lookahead -= s->match_length;
-
- /* Insert new strings in the hash table only if the match length
- * is not too large. This saves time but degrades compression.
- */
- if (s->match_length <= s->max_insert_length &&
- s->lookahead >= MIN_MATCH) {
- s->match_length--; /* string at strstart already in hash table */
- do {
- s->strstart++;
- INSERT_STRING(s, s->strstart, hash_head);
- /* strstart never exceeds WSIZE-MAX_MATCH, so there are
- * always MIN_MATCH bytes ahead.
- */
- } while (--s->match_length != 0);
- s->strstart++;
- } else {
- s->strstart += s->match_length;
- s->match_length = 0;
- s->ins_h = s->window[s->strstart];
- UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
-#if MIN_MATCH != 3
- Call UPDATE_HASH() MIN_MATCH-3 more times
-#endif
- /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
- * matter since it will be recomputed at next deflate call.
- */
- }
- } else {
- /* No match, output a literal byte */
- Tracevv((stderr,"%c", s->window[s->strstart]));
- bflush = zlib_tr_tally (s, 0, s->window[s->strstart]);
- s->lookahead--;
- s->strstart++;
- }
- if (bflush) FLUSH_BLOCK(s, 0);
- }
- FLUSH_BLOCK(s, flush == Z_FINISH);
- return flush == Z_FINISH ? finish_done : block_done;
-}
-
-/* ===========================================================================
- * Same as above, but achieves better compression. We use a lazy
- * evaluation for matches: a match is finally adopted only if there is
- * no better match at the next window position.
- */
-static block_state deflate_slow(
- deflate_state *s,
- int flush
-)
-{
- IPos hash_head = NIL; /* head of hash chain */
- int bflush; /* set if current block must be flushed */
-
- /* Process the input block. */
- for (;;) {
- /* Make sure that we always have enough lookahead, except
- * at the end of the input file. We need MAX_MATCH bytes
- * for the next match, plus MIN_MATCH bytes to insert the
- * string following the next match.
- */
- if (s->lookahead < MIN_LOOKAHEAD) {
- fill_window(s);
- if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
- return need_more;
- }
- if (s->lookahead == 0) break; /* flush the current block */
- }
-
- /* Insert the string window[strstart .. strstart+2] in the
- * dictionary, and set hash_head to the head of the hash chain:
- */
- if (s->lookahead >= MIN_MATCH) {
- INSERT_STRING(s, s->strstart, hash_head);
- }
-
- /* Find the longest match, discarding those <= prev_length.
- */
- s->prev_length = s->match_length, s->prev_match = s->match_start;
- s->match_length = MIN_MATCH-1;
-
- if (hash_head != NIL && s->prev_length < s->max_lazy_match &&
- s->strstart - hash_head <= MAX_DIST(s)) {
- /* To simplify the code, we prevent matches with the string
- * of window index 0 (in particular we have to avoid a match
- * of the string with itself at the start of the input file).
- */
- if (s->strategy != Z_HUFFMAN_ONLY) {
- s->match_length = longest_match (s, hash_head);
- }
- /* longest_match() sets match_start */
-
- if (s->match_length <= 5 && (s->strategy == Z_FILTERED ||
- (s->match_length == MIN_MATCH &&
- s->strstart - s->match_start > TOO_FAR))) {
-
- /* If prev_match is also MIN_MATCH, match_start is garbage
- * but we will ignore the current match anyway.
- */
- s->match_length = MIN_MATCH-1;
- }
- }
- /* If there was a match at the previous step and the current
- * match is not better, output the previous match:
- */
- if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) {
- uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
- /* Do not insert strings in hash table beyond this. */
-
- check_match(s, s->strstart-1, s->prev_match, s->prev_length);
-
- bflush = zlib_tr_tally(s, s->strstart -1 - s->prev_match,
- s->prev_length - MIN_MATCH);
-
- /* Insert in hash table all strings up to the end of the match.
- * strstart-1 and strstart are already inserted. If there is not
- * enough lookahead, the last two strings are not inserted in
- * the hash table.
- */
- s->lookahead -= s->prev_length-1;
- s->prev_length -= 2;
- do {
- if (++s->strstart <= max_insert) {
- INSERT_STRING(s, s->strstart, hash_head);
- }
- } while (--s->prev_length != 0);
- s->match_available = 0;
- s->match_length = MIN_MATCH-1;
- s->strstart++;
-
- if (bflush) FLUSH_BLOCK(s, 0);
-
- } else if (s->match_available) {
- /* If there was no match at the previous position, output a
- * single literal. If there was a match but the current match
- * is longer, truncate the previous match to a single literal.
- */
- Tracevv((stderr,"%c", s->window[s->strstart-1]));
- if (zlib_tr_tally (s, 0, s->window[s->strstart-1])) {
- FLUSH_BLOCK_ONLY(s, 0);
- }
- s->strstart++;
- s->lookahead--;
- if (s->strm->avail_out == 0) return need_more;
- } else {
- /* There is no previous match to compare with, wait for
- * the next step to decide.
- */
- s->match_available = 1;
- s->strstart++;
- s->lookahead--;
- }
- }
- Assert (flush != Z_NO_FLUSH, "no flush?");
- if (s->match_available) {
- Tracevv((stderr,"%c", s->window[s->strstart-1]));
- zlib_tr_tally (s, 0, s->window[s->strstart-1]);
- s->match_available = 0;
- }
- FLUSH_BLOCK(s, flush == Z_FINISH);
- return flush == Z_FINISH ? finish_done : block_done;
-}
-
-int zlib_deflate_workspacesize(int windowBits, int memLevel)
-{
- if (windowBits < 0) /* undocumented feature: suppress zlib header */
- windowBits = -windowBits;
-
- /* Since the return value is typically passed to vmalloc() unchecked... */
- BUG_ON(memLevel < 1 || memLevel > MAX_MEM_LEVEL || windowBits < 9 ||
- windowBits > 15);
-
- return sizeof(deflate_workspace)
- + zlib_deflate_window_memsize(windowBits)
- + zlib_deflate_prev_memsize(windowBits)
- + zlib_deflate_head_memsize(memLevel)
- + zlib_deflate_overlay_memsize(memLevel);
-}
diff --git a/linux/zlib_deflate/deftree.c b/linux/zlib_deflate/deftree.c
deleted file mode 100644
index 9b1756b..0000000
--- a/linux/zlib_deflate/deftree.c
+++ /dev/null
@@ -1,1113 +0,0 @@
-/* +++ trees.c */
-/* trees.c -- output deflated data using Huffman coding
- * Copyright (C) 1995-1996 Jean-loup Gailly
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/*
- * ALGORITHM
- *
- * The "deflation" process uses several Huffman trees. The more
- * common source values are represented by shorter bit sequences.
- *
- * Each code tree is stored in a compressed form which is itself
- * a Huffman encoding of the lengths of all the code strings (in
- * ascending order by source values). The actual code strings are
- * reconstructed from the lengths in the inflate process, as described
- * in the deflate specification.
- *
- * REFERENCES
- *
- * Deutsch, L.P.,"'Deflate' Compressed Data Format Specification".
- * Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc
- *
- * Storer, James A.
- * Data Compression: Methods and Theory, pp. 49-50.
- * Computer Science Press, 1988. ISBN 0-7167-8156-5.
- *
- * Sedgewick, R.
- * Algorithms, p290.
- * Addison-Wesley, 1983. ISBN 0-201-06672-6.
- */
-
-/* From: trees.c,v 1.11 1996/07/24 13:41:06 me Exp $ */
-
-/* #include "deflate.h" */
-
-#include <linux/zutil.h>
-#include <linux/bitrev.h>
-#include "defutil.h"
-
-#ifdef DEBUG_ZLIB
-# include <ctype.h>
-#endif
-
-/* ===========================================================================
- * Constants
- */
-
-#define MAX_BL_BITS 7
-/* Bit length codes must not exceed MAX_BL_BITS bits */
-
-#define END_BLOCK 256
-/* end of block literal code */
-
-#define REP_3_6 16
-/* repeat previous bit length 3-6 times (2 bits of repeat count) */
-
-#define REPZ_3_10 17
-/* repeat a zero length 3-10 times (3 bits of repeat count) */
-
-#define REPZ_11_138 18
-/* repeat a zero length 11-138 times (7 bits of repeat count) */
-
-static const int extra_lbits[LENGTH_CODES] /* extra bits for each length code */
- = {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0};
-
-static const int extra_dbits[D_CODES] /* extra bits for each distance code */
- = {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13};
-
-static const int extra_blbits[BL_CODES]/* extra bits for each bit length code */
- = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7};
-
-static const uch bl_order[BL_CODES]
- = {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15};
-/* The lengths of the bit length codes are sent in order of decreasing
- * probability, to avoid transmitting the lengths for unused bit length codes.
- */
-
-#define Buf_size (8 * 2*sizeof(char))
-/* Number of bits used within bi_buf. (bi_buf might be implemented on
- * more than 16 bits on some systems.)
- */
-
-/* ===========================================================================
- * Local data. These are initialized only once.
- */
-
-static ct_data static_ltree[L_CODES+2];
-/* The static literal tree. Since the bit lengths are imposed, there is no
- * need for the L_CODES extra codes used during heap construction. However
- * The codes 286 and 287 are needed to build a canonical tree (see zlib_tr_init
- * below).
- */
-
-static ct_data static_dtree[D_CODES];
-/* The static distance tree. (Actually a trivial tree since all codes use
- * 5 bits.)
- */
-
-static uch dist_code[512];
-/* distance codes. The first 256 values correspond to the distances
- * 3 .. 258, the last 256 values correspond to the top 8 bits of
- * the 15 bit distances.
- */
-
-static uch length_code[MAX_MATCH-MIN_MATCH+1];
-/* length code for each normalized match length (0 == MIN_MATCH) */
-
-static int base_length[LENGTH_CODES];
-/* First normalized length for each code (0 = MIN_MATCH) */
-
-static int base_dist[D_CODES];
-/* First normalized distance for each code (0 = distance of 1) */
-
-struct static_tree_desc_s {
- const ct_data *static_tree; /* static tree or NULL */
- const int *extra_bits; /* extra bits for each code or NULL */
- int extra_base; /* base index for extra_bits */
- int elems; /* max number of elements in the tree */
- int max_length; /* max bit length for the codes */
-};
-
-static static_tree_desc static_l_desc =
-{static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS};
-
-static static_tree_desc static_d_desc =
-{static_dtree, extra_dbits, 0, D_CODES, MAX_BITS};
-
-static static_tree_desc static_bl_desc =
-{(const ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS};
-
-/* ===========================================================================
- * Local (static) routines in this file.
- */
-
-static void tr_static_init (void);
-static void init_block (deflate_state *s);
-static void pqdownheap (deflate_state *s, ct_data *tree, int k);
-static void gen_bitlen (deflate_state *s, tree_desc *desc);
-static void gen_codes (ct_data *tree, int max_code, ush *bl_count);
-static void build_tree (deflate_state *s, tree_desc *desc);
-static void scan_tree (deflate_state *s, ct_data *tree, int max_code);
-static void send_tree (deflate_state *s, ct_data *tree, int max_code);
-static int build_bl_tree (deflate_state *s);
-static void send_all_trees (deflate_state *s, int lcodes, int dcodes,
- int blcodes);
-static void compress_block (deflate_state *s, ct_data *ltree,
- ct_data *dtree);
-static void set_data_type (deflate_state *s);
-static void bi_windup (deflate_state *s);
-static void bi_flush (deflate_state *s);
-static void copy_block (deflate_state *s, char *buf, unsigned len,
- int header);
-
-#ifndef DEBUG_ZLIB
-# define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len)
- /* Send a code of the given tree. c and tree must not have side effects */
-
-#else /* DEBUG_ZLIB */
-# define send_code(s, c, tree) \
- { if (z_verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \
- send_bits(s, tree[c].Code, tree[c].Len); }
-#endif
-
-#define d_code(dist) \
- ((dist) < 256 ? dist_code[dist] : dist_code[256+((dist)>>7)])
-/* Mapping from a distance to a distance code. dist is the distance - 1 and
- * must not have side effects. dist_code[256] and dist_code[257] are never
- * used.
- */
-
-/* ===========================================================================
- * Send a value on a given number of bits.
- * IN assertion: length <= 16 and value fits in length bits.
- */
-#ifdef DEBUG_ZLIB
-static void send_bits (deflate_state *s, int value, int length);
-
-static void send_bits(
- deflate_state *s,
- int value, /* value to send */
- int length /* number of bits */
-)
-{
- Tracevv((stderr," l %2d v %4x ", length, value));
- Assert(length > 0 && length <= 15, "invalid length");
- s->bits_sent += (ulg)length;
-
- /* If not enough room in bi_buf, use (valid) bits from bi_buf and
- * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid))
- * unused bits in value.
- */
- if (s->bi_valid > (int)Buf_size - length) {
- s->bi_buf |= (value << s->bi_valid);
- put_short(s, s->bi_buf);
- s->bi_buf = (ush)value >> (Buf_size - s->bi_valid);
- s->bi_valid += length - Buf_size;
- } else {
- s->bi_buf |= value << s->bi_valid;
- s->bi_valid += length;
- }
-}
-#else /* !DEBUG_ZLIB */
-
-#define send_bits(s, value, length) \
-{ int len = length;\
- if (s->bi_valid > (int)Buf_size - len) {\
- int val = value;\
- s->bi_buf |= (val << s->bi_valid);\
- put_short(s, s->bi_buf);\
- s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\
- s->bi_valid += len - Buf_size;\
- } else {\
- s->bi_buf |= (value) << s->bi_valid;\
- s->bi_valid += len;\
- }\
-}
-#endif /* DEBUG_ZLIB */
-
-/* ===========================================================================
- * Initialize the various 'constant' tables. In a multi-threaded environment,
- * this function may be called by two threads concurrently, but this is
- * harmless since both invocations do exactly the same thing.
- */
-static void tr_static_init(void)
-{
- static int static_init_done;
- int n; /* iterates over tree elements */
- int bits; /* bit counter */
- int length; /* length value */
- int code; /* code value */
- int dist; /* distance index */
- ush bl_count[MAX_BITS+1];
- /* number of codes at each bit length for an optimal tree */
-
- if (static_init_done) return;
-
- /* Initialize the mapping length (0..255) -> length code (0..28) */
- length = 0;
- for (code = 0; code < LENGTH_CODES-1; code++) {
- base_length[code] = length;
- for (n = 0; n < (1<<extra_lbits[code]); n++) {
- length_code[length++] = (uch)code;
- }
- }
- Assert (length == 256, "tr_static_init: length != 256");
- /* Note that the length 255 (match length 258) can be represented
- * in two different ways: code 284 + 5 bits or code 285, so we
- * overwrite length_code[255] to use the best encoding:
- */
- length_code[length-1] = (uch)code;
-
- /* Initialize the mapping dist (0..32K) -> dist code (0..29) */
- dist = 0;
- for (code = 0 ; code < 16; code++) {
- base_dist[code] = dist;
- for (n = 0; n < (1<<extra_dbits[code]); n++) {
- dist_code[dist++] = (uch)code;
- }
- }
- Assert (dist == 256, "tr_static_init: dist != 256");
- dist >>= 7; /* from now on, all distances are divided by 128 */
- for ( ; code < D_CODES; code++) {
- base_dist[code] = dist << 7;
- for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) {
- dist_code[256 + dist++] = (uch)code;
- }
- }
- Assert (dist == 256, "tr_static_init: 256+dist != 512");
-
- /* Construct the codes of the static literal tree */
- for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0;
- n = 0;
- while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++;
- while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++;
- while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++;
- while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++;
- /* Codes 286 and 287 do not exist, but we must include them in the
- * tree construction to get a canonical Huffman tree (longest code
- * all ones)
- */
- gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count);
-
- /* The static distance tree is trivial: */
- for (n = 0; n < D_CODES; n++) {
- static_dtree[n].Len = 5;
- static_dtree[n].Code = bitrev32((u32)n) >> (32 - 5);
- }
- static_init_done = 1;
-}
-
-/* ===========================================================================
- * Initialize the tree data structures for a new zlib stream.
- */
-void zlib_tr_init(
- deflate_state *s
-)
-{
- tr_static_init();
-
- s->compressed_len = 0L;
-
- s->l_desc.dyn_tree = s->dyn_ltree;
- s->l_desc.stat_desc = &static_l_desc;
-
- s->d_desc.dyn_tree = s->dyn_dtree;
- s->d_desc.stat_desc = &static_d_desc;
-
- s->bl_desc.dyn_tree = s->bl_tree;
- s->bl_desc.stat_desc = &static_bl_desc;
-
- s->bi_buf = 0;
- s->bi_valid = 0;
- s->last_eob_len = 8; /* enough lookahead for inflate */
-#ifdef DEBUG_ZLIB
- s->bits_sent = 0L;
-#endif
-
- /* Initialize the first block of the first file: */
- init_block(s);
-}
-
-/* ===========================================================================
- * Initialize a new block.
- */
-static void init_block(
- deflate_state *s
-)
-{
- int n; /* iterates over tree elements */
-
- /* Initialize the trees. */
- for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0;
- for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0;
- for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0;
-
- s->dyn_ltree[END_BLOCK].Freq = 1;
- s->opt_len = s->static_len = 0L;
- s->last_lit = s->matches = 0;
-}
-
-#define SMALLEST 1
-/* Index within the heap array of least frequent node in the Huffman tree */
-
-
-/* ===========================================================================
- * Remove the smallest element from the heap and recreate the heap with
- * one less element. Updates heap and heap_len.
- */
-#define pqremove(s, tree, top) \
-{\
- top = s->heap[SMALLEST]; \
- s->heap[SMALLEST] = s->heap[s->heap_len--]; \
- pqdownheap(s, tree, SMALLEST); \
-}
-
-/* ===========================================================================
- * Compares to subtrees, using the tree depth as tie breaker when
- * the subtrees have equal frequency. This minimizes the worst case length.
- */
-#define smaller(tree, n, m, depth) \
- (tree[n].Freq < tree[m].Freq || \
- (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m]))
-
-/* ===========================================================================
- * Restore the heap property by moving down the tree starting at node k,
- * exchanging a node with the smallest of its two sons if necessary, stopping
- * when the heap property is re-established (each father smaller than its
- * two sons).
- */
-static void pqdownheap(
- deflate_state *s,
- ct_data *tree, /* the tree to restore */
- int k /* node to move down */
-)
-{
- int v = s->heap[k];
- int j = k << 1; /* left son of k */
- while (j <= s->heap_len) {
- /* Set j to the smallest of the two sons: */
- if (j < s->heap_len &&
- smaller(tree, s->heap[j+1], s->heap[j], s->depth)) {
- j++;
- }
- /* Exit if v is smaller than both sons */
- if (smaller(tree, v, s->heap[j], s->depth)) break;
-
- /* Exchange v with the smallest son */
- s->heap[k] = s->heap[j]; k = j;
-
- /* And continue down the tree, setting j to the left son of k */
- j <<= 1;
- }
- s->heap[k] = v;
-}
-
-/* ===========================================================================
- * Compute the optimal bit lengths for a tree and update the total bit length
- * for the current block.
- * IN assertion: the fields freq and dad are set, heap[heap_max] and
- * above are the tree nodes sorted by increasing frequency.
- * OUT assertions: the field len is set to the optimal bit length, the
- * array bl_count contains the frequencies for each bit length.
- * The length opt_len is updated; static_len is also updated if stree is
- * not null.
- */
-static void gen_bitlen(
- deflate_state *s,
- tree_desc *desc /* the tree descriptor */
-)
-{
- ct_data *tree = desc->dyn_tree;
- int max_code = desc->max_code;
- const ct_data *stree = desc->stat_desc->static_tree;
- const int *extra = desc->stat_desc->extra_bits;
- int base = desc->stat_desc->extra_base;
- int max_length = desc->stat_desc->max_length;
- int h; /* heap index */
- int n, m; /* iterate over the tree elements */
- int bits; /* bit length */
- int xbits; /* extra bits */
- ush f; /* frequency */
- int overflow = 0; /* number of elements with bit length too large */
-
- for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0;
-
- /* In a first pass, compute the optimal bit lengths (which may
- * overflow in the case of the bit length tree).
- */
- tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */
-
- for (h = s->heap_max+1; h < HEAP_SIZE; h++) {
- n = s->heap[h];
- bits = tree[tree[n].Dad].Len + 1;
- if (bits > max_length) bits = max_length, overflow++;
- tree[n].Len = (ush)bits;
- /* We overwrite tree[n].Dad which is no longer needed */
-
- if (n > max_code) continue; /* not a leaf node */
-
- s->bl_count[bits]++;
- xbits = 0;
- if (n >= base) xbits = extra[n-base];
- f = tree[n].Freq;
- s->opt_len += (ulg)f * (bits + xbits);
- if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits);
- }
- if (overflow == 0) return;
-
- Trace((stderr,"\nbit length overflow\n"));
- /* This happens for example on obj2 and pic of the Calgary corpus */
-
- /* Find the first bit length which could increase: */
- do {
- bits = max_length-1;
- while (s->bl_count[bits] == 0) bits--;
- s->bl_count[bits]--; /* move one leaf down the tree */
- s->bl_count[bits+1] += 2; /* move one overflow item as its brother */
- s->bl_count[max_length]--;
- /* The brother of the overflow item also moves one step up,
- * but this does not affect bl_count[max_length]
- */
- overflow -= 2;
- } while (overflow > 0);
-
- /* Now recompute all bit lengths, scanning in increasing frequency.
- * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all
- * lengths instead of fixing only the wrong ones. This idea is taken
- * from 'ar' written by Haruhiko Okumura.)
- */
- for (bits = max_length; bits != 0; bits--) {
- n = s->bl_count[bits];
- while (n != 0) {
- m = s->heap[--h];
- if (m > max_code) continue;
- if (tree[m].Len != (unsigned) bits) {
- Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits));
- s->opt_len += ((long)bits - (long)tree[m].Len)
- *(long)tree[m].Freq;
- tree[m].Len = (ush)bits;
- }
- n--;
- }
- }
-}
-
-/* ===========================================================================
- * Generate the codes for a given tree and bit counts (which need not be
- * optimal).
- * IN assertion: the array bl_count contains the bit length statistics for
- * the given tree and the field len is set for all tree elements.
- * OUT assertion: the field code is set for all tree elements of non
- * zero code length.
- */
-static void gen_codes(
- ct_data *tree, /* the tree to decorate */
- int max_code, /* largest code with non zero frequency */
- ush *bl_count /* number of codes at each bit length */
-)
-{
- ush next_code[MAX_BITS+1]; /* next code value for each bit length */
- ush code = 0; /* running code value */
- int bits; /* bit index */
- int n; /* code index */
-
- /* The distribution counts are first used to generate the code values
- * without bit reversal.
- */
- for (bits = 1; bits <= MAX_BITS; bits++) {
- next_code[bits] = code = (code + bl_count[bits-1]) << 1;
- }
- /* Check that the bit counts in bl_count are consistent. The last code
- * must be all ones.
- */
- Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1,
- "inconsistent bit counts");
- Tracev((stderr,"\ngen_codes: max_code %d ", max_code));
-
- for (n = 0; n <= max_code; n++) {
- int len = tree[n].Len;
- if (len == 0) continue;
- /* Now reverse the bits */
- tree[n].Code = bitrev32((u32)(next_code[len]++)) >> (32 - len);
-
- Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ",
- n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1));
- }
-}
-
-/* ===========================================================================
- * Construct one Huffman tree and assigns the code bit strings and lengths.
- * Update the total bit length for the current block.
- * IN assertion: the field freq is set for all tree elements.
- * OUT assertions: the fields len and code are set to the optimal bit length
- * and corresponding code. The length opt_len is updated; static_len is
- * also updated if stree is not null. The field max_code is set.
- */
-static void build_tree(
- deflate_state *s,
- tree_desc *desc /* the tree descriptor */
-)
-{
- ct_data *tree = desc->dyn_tree;
- const ct_data *stree = desc->stat_desc->static_tree;
- int elems = desc->stat_desc->elems;
- int n, m; /* iterate over heap elements */
- int max_code = -1; /* largest code with non zero frequency */
- int node; /* new node being created */
-
- /* Construct the initial heap, with least frequent element in
- * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1].
- * heap[0] is not used.
- */
- s->heap_len = 0, s->heap_max = HEAP_SIZE;
-
- for (n = 0; n < elems; n++) {
- if (tree[n].Freq != 0) {
- s->heap[++(s->heap_len)] = max_code = n;
- s->depth[n] = 0;
- } else {
- tree[n].Len = 0;
- }
- }
-
- /* The pkzip format requires that at least one distance code exists,
- * and that at least one bit should be sent even if there is only one
- * possible code. So to avoid special checks later on we force at least
- * two codes of non zero frequency.
- */
- while (s->heap_len < 2) {
- node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0);
- tree[node].Freq = 1;
- s->depth[node] = 0;
- s->opt_len--; if (stree) s->static_len -= stree[node].Len;
- /* node is 0 or 1 so it does not have extra bits */
- }
- desc->max_code = max_code;
-
- /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,
- * establish sub-heaps of increasing lengths:
- */
- for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n);
-
- /* Construct the Huffman tree by repeatedly combining the least two
- * frequent nodes.
- */
- node = elems; /* next internal node of the tree */
- do {
- pqremove(s, tree, n); /* n = node of least frequency */
- m = s->heap[SMALLEST]; /* m = node of next least frequency */
-
- s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */
- s->heap[--(s->heap_max)] = m;
-
- /* Create a new node father of n and m */
- tree[node].Freq = tree[n].Freq + tree[m].Freq;
- s->depth[node] = (uch) (max(s->depth[n], s->depth[m]) + 1);
- tree[n].Dad = tree[m].Dad = (ush)node;
-#ifdef DUMP_BL_TREE
- if (tree == s->bl_tree) {
- fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)",
- node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq);
- }
-#endif
- /* and insert the new node in the heap */
- s->heap[SMALLEST] = node++;
- pqdownheap(s, tree, SMALLEST);
-
- } while (s->heap_len >= 2);
-
- s->heap[--(s->heap_max)] = s->heap[SMALLEST];
-
- /* At this point, the fields freq and dad are set. We can now
- * generate the bit lengths.
- */
- gen_bitlen(s, (tree_desc *)desc);
-
- /* The field len is now set, we can generate the bit codes */
- gen_codes ((ct_data *)tree, max_code, s->bl_count);
-}
-
-/* ===========================================================================
- * Scan a literal or distance tree to determine the frequencies of the codes
- * in the bit length tree.
- */
-static void scan_tree(
- deflate_state *s,
- ct_data *tree, /* the tree to be scanned */
- int max_code /* and its largest code of non zero frequency */
-)
-{
- int n; /* iterates over all tree elements */
- int prevlen = -1; /* last emitted length */
- int curlen; /* length of current code */
- int nextlen = tree[0].Len; /* length of next code */
- int count = 0; /* repeat count of the current code */
- int max_count = 7; /* max repeat count */
- int min_count = 4; /* min repeat count */
-
- if (nextlen == 0) max_count = 138, min_count = 3;
- tree[max_code+1].Len = (ush)0xffff; /* guard */
-
- for (n = 0; n <= max_code; n++) {
- curlen = nextlen; nextlen = tree[n+1].Len;
- if (++count < max_count && curlen == nextlen) {
- continue;
- } else if (count < min_count) {
- s->bl_tree[curlen].Freq += count;
- } else if (curlen != 0) {
- if (curlen != prevlen) s->bl_tree[curlen].Freq++;
- s->bl_tree[REP_3_6].Freq++;
- } else if (count <= 10) {
- s->bl_tree[REPZ_3_10].Freq++;
- } else {
- s->bl_tree[REPZ_11_138].Freq++;
- }
- count = 0; prevlen = curlen;
- if (nextlen == 0) {
- max_count = 138, min_count = 3;
- } else if (curlen == nextlen) {
- max_count = 6, min_count = 3;
- } else {
- max_count = 7, min_count = 4;
- }
- }
-}
-
-/* ===========================================================================
- * Send a literal or distance tree in compressed form, using the codes in
- * bl_tree.
- */
-static void send_tree(
- deflate_state *s,
- ct_data *tree, /* the tree to be scanned */
- int max_code /* and its largest code of non zero frequency */
-)
-{
- int n; /* iterates over all tree elements */
- int prevlen = -1; /* last emitted length */
- int curlen; /* length of current code */
- int nextlen = tree[0].Len; /* length of next code */
- int count = 0; /* repeat count of the current code */
- int max_count = 7; /* max repeat count */
- int min_count = 4; /* min repeat count */
-
- /* tree[max_code+1].Len = -1; */ /* guard already set */
- if (nextlen == 0) max_count = 138, min_count = 3;
-
- for (n = 0; n <= max_code; n++) {
- curlen = nextlen; nextlen = tree[n+1].Len;
- if (++count < max_count && curlen == nextlen) {
- continue;
- } else if (count < min_count) {
- do { send_code(s, curlen, s->bl_tree); } while (--count != 0);
-
- } else if (curlen != 0) {
- if (curlen != prevlen) {
- send_code(s, curlen, s->bl_tree); count--;
- }
- Assert(count >= 3 && count <= 6, " 3_6?");
- send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2);
-
- } else if (count <= 10) {
- send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3);
-
- } else {
- send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7);
- }
- count = 0; prevlen = curlen;
- if (nextlen == 0) {
- max_count = 138, min_count = 3;
- } else if (curlen == nextlen) {
- max_count = 6, min_count = 3;
- } else {
- max_count = 7, min_count = 4;
- }
- }
-}
-
-/* ===========================================================================
- * Construct the Huffman tree for the bit lengths and return the index in
- * bl_order of the last bit length code to send.
- */
-static int build_bl_tree(
- deflate_state *s
-)
-{
- int max_blindex; /* index of last bit length code of non zero freq */
-
- /* Determine the bit length frequencies for literal and distance trees */
- scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code);
- scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code);
-
- /* Build the bit length tree: */
- build_tree(s, (tree_desc *)(&(s->bl_desc)));
- /* opt_len now includes the length of the tree representations, except
- * the lengths of the bit lengths codes and the 5+5+4 bits for the counts.
- */
-
- /* Determine the number of bit length codes to send. The pkzip format
- * requires that at least 4 bit length codes be sent. (appnote.txt says
- * 3 but the actual value used is 4.)
- */
- for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) {
- if (s->bl_tree[bl_order[max_blindex]].Len != 0) break;
- }
- /* Update opt_len to include the bit length tree and counts */
- s->opt_len += 3*(max_blindex+1) + 5+5+4;
- Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld",
- s->opt_len, s->static_len));
-
- return max_blindex;
-}
-
-/* ===========================================================================
- * Send the header for a block using dynamic Huffman trees: the counts, the
- * lengths of the bit length codes, the literal tree and the distance tree.
- * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.
- */
-static void send_all_trees(
- deflate_state *s,
- int lcodes, /* number of codes for each tree */
- int dcodes, /* number of codes for each tree */
- int blcodes /* number of codes for each tree */
-)
-{
- int rank; /* index in bl_order */
-
- Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes");
- Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES,
- "too many codes");
- Tracev((stderr, "\nbl counts: "));
- send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */
- send_bits(s, dcodes-1, 5);
- send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */
- for (rank = 0; rank < blcodes; rank++) {
- Tracev((stderr, "\nbl code %2d ", bl_order[rank]));
- send_bits(s, s->bl_tree[bl_order[rank]].Len, 3);
- }
- Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent));
-
- send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */
- Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent));
-
- send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */
- Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent));
-}
-
-/* ===========================================================================
- * Send a stored block
- */
-void zlib_tr_stored_block(
- deflate_state *s,
- char *buf, /* input block */
- ulg stored_len, /* length of input block */
- int eof /* true if this is the last block for a file */
-)
-{
- send_bits(s, (STORED_BLOCK<<1)+eof, 3); /* send block type */
- s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L;
- s->compressed_len += (stored_len + 4) << 3;
-
- copy_block(s, buf, (unsigned)stored_len, 1); /* with header */
-}
-
-/* Send just the `stored block' type code without any length bytes or data.
- */
-void zlib_tr_stored_type_only(
- deflate_state *s
-)
-{
- send_bits(s, (STORED_BLOCK << 1), 3);
- bi_windup(s);
- s->compressed_len = (s->compressed_len + 3) & ~7L;
-}
-
-
-/* ===========================================================================
- * Send one empty static block to give enough lookahead for inflate.
- * This takes 10 bits, of which 7 may remain in the bit buffer.
- * The current inflate code requires 9 bits of lookahead. If the
- * last two codes for the previous block (real code plus EOB) were coded
- * on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode
- * the last real code. In this case we send two empty static blocks instead
- * of one. (There are no problems if the previous block is stored or fixed.)
- * To simplify the code, we assume the worst case of last real code encoded
- * on one bit only.
- */
-void zlib_tr_align(
- deflate_state *s
-)
-{
- send_bits(s, STATIC_TREES<<1, 3);
- send_code(s, END_BLOCK, static_ltree);
- s->compressed_len += 10L; /* 3 for block type, 7 for EOB */
- bi_flush(s);
- /* Of the 10 bits for the empty block, we have already sent
- * (10 - bi_valid) bits. The lookahead for the last real code (before
- * the EOB of the previous block) was thus at least one plus the length
- * of the EOB plus what we have just sent of the empty static block.
- */
- if (1 + s->last_eob_len + 10 - s->bi_valid < 9) {
- send_bits(s, STATIC_TREES<<1, 3);
- send_code(s, END_BLOCK, static_ltree);
- s->compressed_len += 10L;
- bi_flush(s);
- }
- s->last_eob_len = 7;
-}
-
-/* ===========================================================================
- * Determine the best encoding for the current block: dynamic trees, static
- * trees or store, and output the encoded block to the zip file. This function
- * returns the total compressed length for the file so far.
- */
-ulg zlib_tr_flush_block(
- deflate_state *s,
- char *buf, /* input block, or NULL if too old */
- ulg stored_len, /* length of input block */
- int eof /* true if this is the last block for a file */
-)
-{
- ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */
- int max_blindex = 0; /* index of last bit length code of non zero freq */
-
- /* Build the Huffman trees unless a stored block is forced */
- if (s->level > 0) {
-
- /* Check if the file is ascii or binary */
- if (s->data_type == Z_UNKNOWN) set_data_type(s);
-
- /* Construct the literal and distance trees */
- build_tree(s, (tree_desc *)(&(s->l_desc)));
- Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len,
- s->static_len));
-
- build_tree(s, (tree_desc *)(&(s->d_desc)));
- Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len,
- s->static_len));
- /* At this point, opt_len and static_len are the total bit lengths of
- * the compressed block data, excluding the tree representations.
- */
-
- /* Build the bit length tree for the above two trees, and get the index
- * in bl_order of the last bit length code to send.
- */
- max_blindex = build_bl_tree(s);
-
- /* Determine the best encoding. Compute first the block length in bytes*/
- opt_lenb = (s->opt_len+3+7)>>3;
- static_lenb = (s->static_len+3+7)>>3;
-
- Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ",
- opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,
- s->last_lit));
-
- if (static_lenb <= opt_lenb) opt_lenb = static_lenb;
-
- } else {
- Assert(buf != (char*)0, "lost buf");
- opt_lenb = static_lenb = stored_len + 5; /* force a stored block */
- }
-
- /* If compression failed and this is the first and last block,
- * and if the .zip file can be seeked (to rewrite the local header),
- * the whole file is transformed into a stored file:
- */
-#ifdef STORED_FILE_OK
-# ifdef FORCE_STORED_FILE
- if (eof && s->compressed_len == 0L) { /* force stored file */
-# else
- if (stored_len <= opt_lenb && eof && s->compressed_len==0L && seekable()) {
-# endif
- /* Since LIT_BUFSIZE <= 2*WSIZE, the input data must be there: */
- if (buf == (char*)0) error ("block vanished");
-
- copy_block(s, buf, (unsigned)stored_len, 0); /* without header */
- s->compressed_len = stored_len << 3;
- s->method = STORED;
- } else
-#endif /* STORED_FILE_OK */
-
-#ifdef FORCE_STORED
- if (buf != (char*)0) { /* force stored block */
-#else
- if (stored_len+4 <= opt_lenb && buf != (char*)0) {
- /* 4: two words for the lengths */
-#endif
- /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.
- * Otherwise we can't have processed more than WSIZE input bytes since
- * the last block flush, because compression would have been
- * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to
- * transform a block into a stored block.
- */
- zlib_tr_stored_block(s, buf, stored_len, eof);
-
-#ifdef FORCE_STATIC
- } else if (static_lenb >= 0) { /* force static trees */
-#else
- } else if (static_lenb == opt_lenb) {
-#endif
- send_bits(s, (STATIC_TREES<<1)+eof, 3);
- compress_block(s, (ct_data *)static_ltree, (ct_data *)static_dtree);
- s->compressed_len += 3 + s->static_len;
- } else {
- send_bits(s, (DYN_TREES<<1)+eof, 3);
- send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1,
- max_blindex+1);
- compress_block(s, (ct_data *)s->dyn_ltree, (ct_data *)s->dyn_dtree);
- s->compressed_len += 3 + s->opt_len;
- }
- Assert (s->compressed_len == s->bits_sent, "bad compressed size");
- init_block(s);
-
- if (eof) {
- bi_windup(s);
- s->compressed_len += 7; /* align on byte boundary */
- }
- Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3,
- s->compressed_len-7*eof));
-
- return s->compressed_len >> 3;
-}
-
-/* ===========================================================================
- * Save the match info and tally the frequency counts. Return true if
- * the current block must be flushed.
- */
-int zlib_tr_tally(
- deflate_state *s,
- unsigned dist, /* distance of matched string */
- unsigned lc /* match length-MIN_MATCH or unmatched char (if dist==0) */
-)
-{
- s->d_buf[s->last_lit] = (ush)dist;
- s->l_buf[s->last_lit++] = (uch)lc;
- if (dist == 0) {
- /* lc is the unmatched char */
- s->dyn_ltree[lc].Freq++;
- } else {
- s->matches++;
- /* Here, lc is the match length - MIN_MATCH */
- dist--; /* dist = match distance - 1 */
- Assert((ush)dist < (ush)MAX_DIST(s) &&
- (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) &&
- (ush)d_code(dist) < (ush)D_CODES, "zlib_tr_tally: bad match");
-
- s->dyn_ltree[length_code[lc]+LITERALS+1].Freq++;
- s->dyn_dtree[d_code(dist)].Freq++;
- }
-
- /* Try to guess if it is profitable to stop the current block here */
- if ((s->last_lit & 0xfff) == 0 && s->level > 2) {
- /* Compute an upper bound for the compressed length */
- ulg out_length = (ulg)s->last_lit*8L;
- ulg in_length = (ulg)((long)s->strstart - s->block_start);
- int dcode;
- for (dcode = 0; dcode < D_CODES; dcode++) {
- out_length += (ulg)s->dyn_dtree[dcode].Freq *
- (5L+extra_dbits[dcode]);
- }
- out_length >>= 3;
- Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ",
- s->last_lit, in_length, out_length,
- 100L - out_length*100L/in_length));
- if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1;
- }
- return (s->last_lit == s->lit_bufsize-1);
- /* We avoid equality with lit_bufsize because of wraparound at 64K
- * on 16 bit machines and because stored blocks are restricted to
- * 64K-1 bytes.
- */
-}
-
-/* ===========================================================================
- * Send the block data compressed using the given Huffman trees
- */
-static void compress_block(
- deflate_state *s,
- ct_data *ltree, /* literal tree */
- ct_data *dtree /* distance tree */
-)
-{
- unsigned dist; /* distance of matched string */
- int lc; /* match length or unmatched char (if dist == 0) */
- unsigned lx = 0; /* running index in l_buf */
- unsigned code; /* the code to send */
- int extra; /* number of extra bits to send */
-
- if (s->last_lit != 0) do {
- dist = s->d_buf[lx];
- lc = s->l_buf[lx++];
- if (dist == 0) {
- send_code(s, lc, ltree); /* send a literal byte */
- Tracecv(isgraph(lc), (stderr," '%c' ", lc));
- } else {
- /* Here, lc is the match length - MIN_MATCH */
- code = length_code[lc];
- send_code(s, code+LITERALS+1, ltree); /* send the length code */
- extra = extra_lbits[code];
- if (extra != 0) {
- lc -= base_length[code];
- send_bits(s, lc, extra); /* send the extra length bits */
- }
- dist--; /* dist is now the match distance - 1 */
- code = d_code(dist);
- Assert (code < D_CODES, "bad d_code");
-
- send_code(s, code, dtree); /* send the distance code */
- extra = extra_dbits[code];
- if (extra != 0) {
- dist -= base_dist[code];
- send_bits(s, dist, extra); /* send the extra distance bits */
- }
- } /* literal or match pair ? */
-
- /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */
- Assert(s->pending < s->lit_bufsize + 2*lx, "pendingBuf overflow");
-
- } while (lx < s->last_lit);
-
- send_code(s, END_BLOCK, ltree);
- s->last_eob_len = ltree[END_BLOCK].Len;
-}
-
-/* ===========================================================================
- * Set the data type to ASCII or BINARY, using a crude approximation:
- * binary if more than 20% of the bytes are <= 6 or >= 128, ascii otherwise.
- * IN assertion: the fields freq of dyn_ltree are set and the total of all
- * frequencies does not exceed 64K (to fit in an int on 16 bit machines).
- */
-static void set_data_type(
- deflate_state *s
-)
-{
- int n = 0;
- unsigned ascii_freq = 0;
- unsigned bin_freq = 0;
- while (n < 7) bin_freq += s->dyn_ltree[n++].Freq;
- while (n < 128) ascii_freq += s->dyn_ltree[n++].Freq;
- while (n < LITERALS) bin_freq += s->dyn_ltree[n++].Freq;
- s->data_type = (Byte)(bin_freq > (ascii_freq >> 2) ? Z_BINARY : Z_ASCII);
-}
-
-/* ===========================================================================
- * Copy a stored block, storing first the length and its
- * one's complement if requested.
- */
-static void copy_block(
- deflate_state *s,
- char *buf, /* the input data */
- unsigned len, /* its length */
- int header /* true if block header must be written */
-)
-{
- bi_windup(s); /* align on byte boundary */
- s->last_eob_len = 8; /* enough lookahead for inflate */
-
- if (header) {
- put_short(s, (ush)len);
- put_short(s, (ush)~len);
-#ifdef DEBUG_ZLIB
- s->bits_sent += 2*16;
-#endif
- }
-#ifdef DEBUG_ZLIB
- s->bits_sent += (ulg)len<<3;
-#endif
- /* bundle up the put_byte(s, *buf++) calls */
- memcpy(&s->pending_buf[s->pending], buf, len);
- s->pending += len;
-}
-
diff --git a/linux/zlib_deflate/defutil.h b/linux/zlib_deflate/defutil.h
deleted file mode 100644
index a8c3708..0000000
--- a/linux/zlib_deflate/defutil.h
+++ /dev/null
@@ -1,327 +0,0 @@
-
-
-
-#define Assert(err, str)
-#define Trace(dummy)
-#define Tracev(dummy)
-#define Tracecv(err, dummy)
-#define Tracevv(dummy)
-
-
-
-#define LENGTH_CODES 29
-/* number of length codes, not counting the special END_BLOCK code */
-
-#define LITERALS 256
-/* number of literal bytes 0..255 */
-
-#define L_CODES (LITERALS+1+LENGTH_CODES)
-/* number of Literal or Length codes, including the END_BLOCK code */
-
-#define D_CODES 30
-/* number of distance codes */
-
-#define BL_CODES 19
-/* number of codes used to transfer the bit lengths */
-
-#define HEAP_SIZE (2*L_CODES+1)
-/* maximum heap size */
-
-#define MAX_BITS 15
-/* All codes must not exceed MAX_BITS bits */
-
-#define INIT_STATE 42
-#define BUSY_STATE 113
-#define FINISH_STATE 666
-/* Stream status */
-
-
-/* Data structure describing a single value and its code string. */
-typedef struct ct_data_s {
- union {
- ush freq; /* frequency count */
- ush code; /* bit string */
- } fc;
- union {
- ush dad; /* father node in Huffman tree */
- ush len; /* length of bit string */
- } dl;
-} ct_data;
-
-#define Freq fc.freq
-#define Code fc.code
-#define Dad dl.dad
-#define Len dl.len
-
-typedef struct static_tree_desc_s static_tree_desc;
-
-typedef struct tree_desc_s {
- ct_data *dyn_tree; /* the dynamic tree */
- int max_code; /* largest code with non zero frequency */
- static_tree_desc *stat_desc; /* the corresponding static tree */
-} tree_desc;
-
-typedef ush Pos;
-typedef unsigned IPos;
-
-/* A Pos is an index in the character window. We use short instead of int to
- * save space in the various tables. IPos is used only for parameter passing.
- */
-
-typedef struct deflate_state {
- z_streamp strm; /* pointer back to this zlib stream */
- int status; /* as the name implies */
- Byte *pending_buf; /* output still pending */
- ulg pending_buf_size; /* size of pending_buf */
- Byte *pending_out; /* next pending byte to output to the stream */
- int pending; /* nb of bytes in the pending buffer */
- int noheader; /* suppress zlib header and adler32 */
- Byte data_type; /* UNKNOWN, BINARY or ASCII */
- Byte method; /* STORED (for zip only) or DEFLATED */
- int last_flush; /* value of flush param for previous deflate call */
-
- /* used by deflate.c: */
-
- uInt w_size; /* LZ77 window size (32K by default) */
- uInt w_bits; /* log2(w_size) (8..16) */
- uInt w_mask; /* w_size - 1 */
-
- Byte *window;
- /* Sliding window. Input bytes are read into the second half of the window,
- * and move to the first half later to keep a dictionary of at least wSize
- * bytes. With this organization, matches are limited to a distance of
- * wSize-MAX_MATCH bytes, but this ensures that IO is always
- * performed with a length multiple of the block size. Also, it limits
- * the window size to 64K, which is quite useful on MSDOS.
- * To do: use the user input buffer as sliding window.
- */
-
- ulg window_size;
- /* Actual size of window: 2*wSize, except when the user input buffer
- * is directly used as sliding window.
- */
-
- Pos *prev;
- /* Link to older string with same hash index. To limit the size of this
- * array to 64K, this link is maintained only for the last 32K strings.
- * An index in this array is thus a window index modulo 32K.
- */
-
- Pos *head; /* Heads of the hash chains or NIL. */
-
- uInt ins_h; /* hash index of string to be inserted */
- uInt hash_size; /* number of elements in hash table */
- uInt hash_bits; /* log2(hash_size) */
- uInt hash_mask; /* hash_size-1 */
-
- uInt hash_shift;
- /* Number of bits by which ins_h must be shifted at each input
- * step. It must be such that after MIN_MATCH steps, the oldest
- * byte no longer takes part in the hash key, that is:
- * hash_shift * MIN_MATCH >= hash_bits
- */
-
- long block_start;
- /* Window position at the beginning of the current output block. Gets
- * negative when the window is moved backwards.
- */
-
- uInt match_length; /* length of best match */
- IPos prev_match; /* previous match */
- int match_available; /* set if previous match exists */
- uInt strstart; /* start of string to insert */
- uInt match_start; /* start of matching string */
- uInt lookahead; /* number of valid bytes ahead in window */
-
- uInt prev_length;
- /* Length of the best match at previous step. Matches not greater than this
- * are discarded. This is used in the lazy match evaluation.
- */
-
- uInt max_chain_length;
- /* To speed up deflation, hash chains are never searched beyond this
- * length. A higher limit improves compression ratio but degrades the
- * speed.
- */
-
- uInt max_lazy_match;
- /* Attempt to find a better match only when the current match is strictly
- * smaller than this value. This mechanism is used only for compression
- * levels >= 4.
- */
-# define max_insert_length max_lazy_match
- /* Insert new strings in the hash table only if the match length is not
- * greater than this length. This saves time but degrades compression.
- * max_insert_length is used only for compression levels <= 3.
- */
-
- int level; /* compression level (1..9) */
- int strategy; /* favor or force Huffman coding*/
-
- uInt good_match;
- /* Use a faster search when the previous match is longer than this */
-
- int nice_match; /* Stop searching when current match exceeds this */
-
- /* used by trees.c: */
- /* Didn't use ct_data typedef below to suppress compiler warning */
- struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */
- struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */
- struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */
-
- struct tree_desc_s l_desc; /* desc. for literal tree */
- struct tree_desc_s d_desc; /* desc. for distance tree */
- struct tree_desc_s bl_desc; /* desc. for bit length tree */
-
- ush bl_count[MAX_BITS+1];
- /* number of codes at each bit length for an optimal tree */
-
- int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */
- int heap_len; /* number of elements in the heap */
- int heap_max; /* element of largest frequency */
- /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
- * The same heap array is used to build all trees.
- */
-
- uch depth[2*L_CODES+1];
- /* Depth of each subtree used as tie breaker for trees of equal frequency
- */
-
- uch *l_buf; /* buffer for literals or lengths */
-
- uInt lit_bufsize;
- /* Size of match buffer for literals/lengths. There are 4 reasons for
- * limiting lit_bufsize to 64K:
- * - frequencies can be kept in 16 bit counters
- * - if compression is not successful for the first block, all input
- * data is still in the window so we can still emit a stored block even
- * when input comes from standard input. (This can also be done for
- * all blocks if lit_bufsize is not greater than 32K.)
- * - if compression is not successful for a file smaller than 64K, we can
- * even emit a stored file instead of a stored block (saving 5 bytes).
- * This is applicable only for zip (not gzip or zlib).
- * - creating new Huffman trees less frequently may not provide fast
- * adaptation to changes in the input data statistics. (Take for
- * example a binary file with poorly compressible code followed by
- * a highly compressible string table.) Smaller buffer sizes give
- * fast adaptation but have of course the overhead of transmitting
- * trees more frequently.
- * - I can't count above 4
- */
-
- uInt last_lit; /* running index in l_buf */
-
- ush *d_buf;
- /* Buffer for distances. To simplify the code, d_buf and l_buf have
- * the same number of elements. To use different lengths, an extra flag
- * array would be necessary.
- */
-
- ulg opt_len; /* bit length of current block with optimal trees */
- ulg static_len; /* bit length of current block with static trees */
- ulg compressed_len; /* total bit length of compressed file */
- uInt matches; /* number of string matches in current block */
- int last_eob_len; /* bit length of EOB code for last block */
-
-#ifdef DEBUG_ZLIB
- ulg bits_sent; /* bit length of the compressed data */
-#endif
-
- ush bi_buf;
- /* Output buffer. bits are inserted starting at the bottom (least
- * significant bits).
- */
- int bi_valid;
- /* Number of valid bits in bi_buf. All bits above the last valid bit
- * are always zero.
- */
-
-} deflate_state;
-
-typedef struct deflate_workspace {
- /* State memory for the deflator */
- deflate_state deflate_memory;
- Byte *window_memory;
- Pos *prev_memory;
- Pos *head_memory;
- char *overlay_memory;
-} deflate_workspace;
-
-#define zlib_deflate_window_memsize(windowBits) \
- (2 * (1 << (windowBits)) * sizeof(Byte))
-#define zlib_deflate_prev_memsize(windowBits) \
- ((1 << (windowBits)) * sizeof(Pos))
-#define zlib_deflate_head_memsize(memLevel) \
- ((1 << ((memLevel)+7)) * sizeof(Pos))
-#define zlib_deflate_overlay_memsize(memLevel) \
- ((1 << ((memLevel)+6)) * (sizeof(ush)+2))
-
-/* Output a byte on the stream.
- * IN assertion: there is enough room in pending_buf.
- */
-#define put_byte(s, c) {s->pending_buf[s->pending++] = (c);}
-
-
-#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
-/* Minimum amount of lookahead, except at the end of the input file.
- * See deflate.c for comments about the MIN_MATCH+1.
- */
-
-#define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD)
-/* In order to simplify the code, particularly on 16 bit machines, match
- * distances are limited to MAX_DIST instead of WSIZE.
- */
-
- /* in trees.c */
-void zlib_tr_init (deflate_state *s);
-int zlib_tr_tally (deflate_state *s, unsigned dist, unsigned lc);
-ulg zlib_tr_flush_block (deflate_state *s, char *buf, ulg stored_len,
- int eof);
-void zlib_tr_align (deflate_state *s);
-void zlib_tr_stored_block (deflate_state *s, char *buf, ulg stored_len,
- int eof);
-void zlib_tr_stored_type_only (deflate_state *);
-
-
-/* ===========================================================================
- * Output a short LSB first on the stream.
- * IN assertion: there is enough room in pendingBuf.
- */
-#define put_short(s, w) { \
- put_byte(s, (uch)((w) & 0xff)); \
- put_byte(s, (uch)((ush)(w) >> 8)); \
-}
-
-/* ===========================================================================
- * Flush the bit buffer, keeping at most 7 bits in it.
- */
-static inline void bi_flush(deflate_state *s)
-{
- if (s->bi_valid == 16) {
- put_short(s, s->bi_buf);
- s->bi_buf = 0;
- s->bi_valid = 0;
- } else if (s->bi_valid >= 8) {
- put_byte(s, (Byte)s->bi_buf);
- s->bi_buf >>= 8;
- s->bi_valid -= 8;
- }
-}
-
-/* ===========================================================================
- * Flush the bit buffer and align the output on a byte boundary
- */
-static inline void bi_windup(deflate_state *s)
-{
- if (s->bi_valid > 8) {
- put_short(s, s->bi_buf);
- } else if (s->bi_valid > 0) {
- put_byte(s, (Byte)s->bi_buf);
- }
- s->bi_buf = 0;
- s->bi_valid = 0;
-#ifdef DEBUG_ZLIB
- s->bits_sent = (s->bits_sent+7) & ~7;
-#endif
-}
-
diff --git a/linux/zlib_inflate/inffast.c b/linux/zlib_inflate/inffast.c
deleted file mode 100644
index 2c13ecc..0000000
--- a/linux/zlib_inflate/inffast.c
+++ /dev/null
@@ -1,363 +0,0 @@
-/* inffast.c -- fast decoding
- * Copyright (C) 1995-2004 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-#include <linux/zutil.h>
-#include "inftrees.h"
-#include "inflate.h"
-#include "inffast.h"
-
-#ifndef ASMINF
-
-/* Allow machine dependent optimization for post-increment or pre-increment.
- Based on testing to date,
- Pre-increment preferred for:
- - PowerPC G3 (Adler)
- - MIPS R5000 (Randers-Pehrson)
- Post-increment preferred for:
- - none
- No measurable difference:
- - Pentium III (Anderson)
- - M68060 (Nikl)
- */
-union uu {
- unsigned short us;
- unsigned char b[2];
-};
-
-/* Endian independed version */
-static inline unsigned short
-get_unaligned16(const unsigned short *p)
-{
- union uu mm;
- unsigned char *b = (unsigned char *)p;
-
- mm.b[0] = b[0];
- mm.b[1] = b[1];
- return mm.us;
-}
-
-#ifdef POSTINC
-# define OFF 0
-# define PUP(a) *(a)++
-# define UP_UNALIGNED(a) get_unaligned16((a)++)
-#else
-# define OFF 1
-# define PUP(a) *++(a)
-# define UP_UNALIGNED(a) get_unaligned16(++(a))
-#endif
-
-/*
- Decode literal, length, and distance codes and write out the resulting
- literal and match bytes until either not enough input or output is
- available, an end-of-block is encountered, or a data error is encountered.
- When large enough input and output buffers are supplied to inflate(), for
- example, a 16K input buffer and a 64K output buffer, more than 95% of the
- inflate execution time is spent in this routine.
-
- Entry assumptions:
-
- state->mode == LEN
- strm->avail_in >= 6
- strm->avail_out >= 258
- start >= strm->avail_out
- state->bits < 8
-
- On return, state->mode is one of:
-
- LEN -- ran out of enough output space or enough available input
- TYPE -- reached end of block code, inflate() to interpret next block
- BAD -- error in block data
-
- Notes:
-
- - The maximum input bits used by a length/distance pair is 15 bits for the
- length code, 5 bits for the length extra, 15 bits for the distance code,
- and 13 bits for the distance extra. This totals 48 bits, or six bytes.
- Therefore if strm->avail_in >= 6, then there is enough input to avoid
- checking for available input while decoding.
-
- - The maximum bytes that a single length/distance pair can output is 258
- bytes, which is the maximum length that can be coded. inflate_fast()
- requires strm->avail_out >= 258 for each loop to avoid checking for
- output space.
-
- - @start: inflate()'s starting value for strm->avail_out
- */
-void inflate_fast(z_streamp strm, unsigned start)
-{
- struct inflate_state *state;
- const unsigned char *in; /* local strm->next_in */
- const unsigned char *last; /* while in < last, enough input available */
- unsigned char *out; /* local strm->next_out */
- unsigned char *beg; /* inflate()'s initial strm->next_out */
- unsigned char *end; /* while out < end, enough space available */
-#ifdef INFLATE_STRICT
- unsigned dmax; /* maximum distance from zlib header */
-#endif
- unsigned wsize; /* window size or zero if not using window */
- unsigned whave; /* valid bytes in the window */
- unsigned write; /* window write index */
- unsigned char *window; /* allocated sliding window, if wsize != 0 */
- unsigned long hold; /* local strm->hold */
- unsigned bits; /* local strm->bits */
- code const *lcode; /* local strm->lencode */
- code const *dcode; /* local strm->distcode */
- unsigned lmask; /* mask for first level of length codes */
- unsigned dmask; /* mask for first level of distance codes */
- code this; /* retrieved table entry */
- unsigned op; /* code bits, operation, extra bits, or */
- /* window position, window bytes to copy */
- unsigned len; /* match length, unused bytes */
- unsigned dist; /* match distance */
- unsigned char *from; /* where to copy match from */
-
- /* copy state to local variables */
- state = (struct inflate_state *)strm->state;
- in = strm->next_in - OFF;
- last = in + (strm->avail_in - 5);
- out = strm->next_out - OFF;
- beg = out - (start - strm->avail_out);
- end = out + (strm->avail_out - 257);
-#ifdef INFLATE_STRICT
- dmax = state->dmax;
-#endif
- wsize = state->wsize;
- whave = state->whave;
- write = state->write;
- window = state->window;
- hold = state->hold;
- bits = state->bits;
- lcode = state->lencode;
- dcode = state->distcode;
- lmask = (1U << state->lenbits) - 1;
- dmask = (1U << state->distbits) - 1;
-
- /* decode literals and length/distances until end-of-block or not enough
- input data or output space */
- do {
- if (bits < 15) {
- hold += (unsigned long)(PUP(in)) << bits;
- bits += 8;
- hold += (unsigned long)(PUP(in)) << bits;
- bits += 8;
- }
- this = lcode[hold & lmask];
- dolen:
- op = (unsigned)(this.bits);
- hold >>= op;
- bits -= op;
- op = (unsigned)(this.op);
- if (op == 0) { /* literal */
- PUP(out) = (unsigned char)(this.val);
- }
- else if (op & 16) { /* length base */
- len = (unsigned)(this.val);
- op &= 15; /* number of extra bits */
- if (op) {
- if (bits < op) {
- hold += (unsigned long)(PUP(in)) << bits;
- bits += 8;
- }
- len += (unsigned)hold & ((1U << op) - 1);
- hold >>= op;
- bits -= op;
- }
- if (bits < 15) {
- hold += (unsigned long)(PUP(in)) << bits;
- bits += 8;
- hold += (unsigned long)(PUP(in)) << bits;
- bits += 8;
- }
- this = dcode[hold & dmask];
- dodist:
- op = (unsigned)(this.bits);
- hold >>= op;
- bits -= op;
- op = (unsigned)(this.op);
- if (op & 16) { /* distance base */
- dist = (unsigned)(this.val);
- op &= 15; /* number of extra bits */
- if (bits < op) {
- hold += (unsigned long)(PUP(in)) << bits;
- bits += 8;
- if (bits < op) {
- hold += (unsigned long)(PUP(in)) << bits;
- bits += 8;
- }
- }
- dist += (unsigned)hold & ((1U << op) - 1);
-#ifdef INFLATE_STRICT
- if (dist > dmax) {
- strm->msg = (char *)"invalid distance too far back";
- state->mode = BAD;
- break;
- }
-#endif
- hold >>= op;
- bits -= op;
- op = (unsigned)(out - beg); /* max distance in output */
- if (dist > op) { /* see if copy from window */
- op = dist - op; /* distance back in window */
- if (op > whave) {
- strm->msg = (char *)"invalid distance too far back";
- state->mode = BAD;
- break;
- }
- from = window - OFF;
- if (write == 0) { /* very common case */
- from += wsize - op;
- if (op < len) { /* some from window */
- len -= op;
- do {
- PUP(out) = PUP(from);
- } while (--op);
- from = out - dist; /* rest from output */
- }
- }
- else if (write < op) { /* wrap around window */
- from += wsize + write - op;
- op -= write;
- if (op < len) { /* some from end of window */
- len -= op;
- do {
- PUP(out) = PUP(from);
- } while (--op);
- from = window - OFF;
- if (write < len) { /* some from start of window */
- op = write;
- len -= op;
- do {
- PUP(out) = PUP(from);
- } while (--op);
- from = out - dist; /* rest from output */
- }
- }
- }
- else { /* contiguous in window */
- from += write - op;
- if (op < len) { /* some from window */
- len -= op;
- do {
- PUP(out) = PUP(from);
- } while (--op);
- from = out - dist; /* rest from output */
- }
- }
- while (len > 2) {
- PUP(out) = PUP(from);
- PUP(out) = PUP(from);
- PUP(out) = PUP(from);
- len -= 3;
- }
- if (len) {
- PUP(out) = PUP(from);
- if (len > 1)
- PUP(out) = PUP(from);
- }
- }
- else {
- unsigned short *sout;
- unsigned long loops;
-
- from = out - dist; /* copy direct from output */
- /* minimum length is three */
- /* Align out addr */
- if (!((long)(out - 1 + OFF) & 1)) {
- PUP(out) = PUP(from);
- len--;
- }
- sout = (unsigned short *)(out - OFF);
- if (dist > 2) {
- unsigned short *sfrom;
-
- sfrom = (unsigned short *)(from - OFF);
- loops = len >> 1;
- do
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- PUP(sout) = PUP(sfrom);
-#else
- PUP(sout) = UP_UNALIGNED(sfrom);
-#endif
- while (--loops);
- out = (unsigned char *)sout + OFF;
- from = (unsigned char *)sfrom + OFF;
- } else { /* dist == 1 or dist == 2 */
- unsigned short pat16;
-
- pat16 = *(sout-1+OFF);
- if (dist == 1) {
- union uu mm;
- /* copy one char pattern to both bytes */
- mm.us = pat16;
- mm.b[0] = mm.b[1];
- pat16 = mm.us;
- }
- loops = len >> 1;
- do
- PUP(sout) = pat16;
- while (--loops);
- out = (unsigned char *)sout + OFF;
- }
- if (len & 1)
- PUP(out) = PUP(from);
- }
- }
- else if ((op & 64) == 0) { /* 2nd level distance code */
- this = dcode[this.val + (hold & ((1U << op) - 1))];
- goto dodist;
- }
- else {
- strm->msg = (char *)"invalid distance code";
- state->mode = BAD;
- break;
- }
- }
- else if ((op & 64) == 0) { /* 2nd level length code */
- this = lcode[this.val + (hold & ((1U << op) - 1))];
- goto dolen;
- }
- else if (op & 32) { /* end-of-block */
- state->mode = TYPE;
- break;
- }
- else {
- strm->msg = (char *)"invalid literal/length code";
- state->mode = BAD;
- break;
- }
- } while (in < last && out < end);
-
- /* return unused bytes (on entry, bits < 8, so in won't go too far back) */
- len = bits >> 3;
- in -= len;
- bits -= len << 3;
- hold &= (1U << bits) - 1;
-
- /* update state and return */
- strm->next_in = in + OFF;
- strm->next_out = out + OFF;
- strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last));
- strm->avail_out = (unsigned)(out < end ?
- 257 + (end - out) : 257 - (out - end));
- state->hold = hold;
- state->bits = bits;
- return;
-}
-
-/*
- inflate_fast() speedups that turned out slower (on a PowerPC G3 750CXe):
- - Using bit fields for code structure
- - Different op definition to avoid & for extra bits (do & for table bits)
- - Three separate decoding do-loops for direct, window, and write == 0
- - Special case for distance > 1 copies to do overlapped load and store copy
- - Explicit branch predictions (based on measured branch probabilities)
- - Deferring match copy and interspersed it with decoding subsequent codes
- - Swapping literal/length else
- - Swapping window/direct else
- - Larger unrolled copy loops (three is about right)
- - Moving len -= 3 statement into middle of loop
- */
-
-#endif /* !ASMINF */
diff --git a/linux/zlib_inflate/inffast.h b/linux/zlib_inflate/inffast.h
deleted file mode 100644
index 40315d9..0000000
--- a/linux/zlib_inflate/inffast.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* inffast.h -- header to use inffast.c
- * Copyright (C) 1995-2003 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-void inflate_fast (z_streamp strm, unsigned start);
diff --git a/linux/zlib_inflate/inffixed.h b/linux/zlib_inflate/inffixed.h
deleted file mode 100644
index 75ed4b5..0000000
--- a/linux/zlib_inflate/inffixed.h
+++ /dev/null
@@ -1,94 +0,0 @@
- /* inffixed.h -- table for decoding fixed codes
- * Generated automatically by makefixed().
- */
-
- /* WARNING: this file should *not* be used by applications. It
- is part of the implementation of the compression library and
- is subject to change. Applications should only use zlib.h.
- */
-
- static const code lenfix[512] = {
- {96,7,0},{0,8,80},{0,8,16},{20,8,115},{18,7,31},{0,8,112},{0,8,48},
- {0,9,192},{16,7,10},{0,8,96},{0,8,32},{0,9,160},{0,8,0},{0,8,128},
- {0,8,64},{0,9,224},{16,7,6},{0,8,88},{0,8,24},{0,9,144},{19,7,59},
- {0,8,120},{0,8,56},{0,9,208},{17,7,17},{0,8,104},{0,8,40},{0,9,176},
- {0,8,8},{0,8,136},{0,8,72},{0,9,240},{16,7,4},{0,8,84},{0,8,20},
- {21,8,227},{19,7,43},{0,8,116},{0,8,52},{0,9,200},{17,7,13},{0,8,100},
- {0,8,36},{0,9,168},{0,8,4},{0,8,132},{0,8,68},{0,9,232},{16,7,8},
- {0,8,92},{0,8,28},{0,9,152},{20,7,83},{0,8,124},{0,8,60},{0,9,216},
- {18,7,23},{0,8,108},{0,8,44},{0,9,184},{0,8,12},{0,8,140},{0,8,76},
- {0,9,248},{16,7,3},{0,8,82},{0,8,18},{21,8,163},{19,7,35},{0,8,114},
- {0,8,50},{0,9,196},{17,7,11},{0,8,98},{0,8,34},{0,9,164},{0,8,2},
- {0,8,130},{0,8,66},{0,9,228},{16,7,7},{0,8,90},{0,8,26},{0,9,148},
- {20,7,67},{0,8,122},{0,8,58},{0,9,212},{18,7,19},{0,8,106},{0,8,42},
- {0,9,180},{0,8,10},{0,8,138},{0,8,74},{0,9,244},{16,7,5},{0,8,86},
- {0,8,22},{64,8,0},{19,7,51},{0,8,118},{0,8,54},{0,9,204},{17,7,15},
- {0,8,102},{0,8,38},{0,9,172},{0,8,6},{0,8,134},{0,8,70},{0,9,236},
- {16,7,9},{0,8,94},{0,8,30},{0,9,156},{20,7,99},{0,8,126},{0,8,62},
- {0,9,220},{18,7,27},{0,8,110},{0,8,46},{0,9,188},{0,8,14},{0,8,142},
- {0,8,78},{0,9,252},{96,7,0},{0,8,81},{0,8,17},{21,8,131},{18,7,31},
- {0,8,113},{0,8,49},{0,9,194},{16,7,10},{0,8,97},{0,8,33},{0,9,162},
- {0,8,1},{0,8,129},{0,8,65},{0,9,226},{16,7,6},{0,8,89},{0,8,25},
- {0,9,146},{19,7,59},{0,8,121},{0,8,57},{0,9,210},{17,7,17},{0,8,105},
- {0,8,41},{0,9,178},{0,8,9},{0,8,137},{0,8,73},{0,9,242},{16,7,4},
- {0,8,85},{0,8,21},{16,8,258},{19,7,43},{0,8,117},{0,8,53},{0,9,202},
- {17,7,13},{0,8,101},{0,8,37},{0,9,170},{0,8,5},{0,8,133},{0,8,69},
- {0,9,234},{16,7,8},{0,8,93},{0,8,29},{0,9,154},{20,7,83},{0,8,125},
- {0,8,61},{0,9,218},{18,7,23},{0,8,109},{0,8,45},{0,9,186},{0,8,13},
- {0,8,141},{0,8,77},{0,9,250},{16,7,3},{0,8,83},{0,8,19},{21,8,195},
- {19,7,35},{0,8,115},{0,8,51},{0,9,198},{17,7,11},{0,8,99},{0,8,35},
- {0,9,166},{0,8,3},{0,8,131},{0,8,67},{0,9,230},{16,7,7},{0,8,91},
- {0,8,27},{0,9,150},{20,7,67},{0,8,123},{0,8,59},{0,9,214},{18,7,19},
- {0,8,107},{0,8,43},{0,9,182},{0,8,11},{0,8,139},{0,8,75},{0,9,246},
- {16,7,5},{0,8,87},{0,8,23},{64,8,0},{19,7,51},{0,8,119},{0,8,55},
- {0,9,206},{17,7,15},{0,8,103},{0,8,39},{0,9,174},{0,8,7},{0,8,135},
- {0,8,71},{0,9,238},{16,7,9},{0,8,95},{0,8,31},{0,9,158},{20,7,99},
- {0,8,127},{0,8,63},{0,9,222},{18,7,27},{0,8,111},{0,8,47},{0,9,190},
- {0,8,15},{0,8,143},{0,8,79},{0,9,254},{96,7,0},{0,8,80},{0,8,16},
- {20,8,115},{18,7,31},{0,8,112},{0,8,48},{0,9,193},{16,7,10},{0,8,96},
- {0,8,32},{0,9,161},{0,8,0},{0,8,128},{0,8,64},{0,9,225},{16,7,6},
- {0,8,88},{0,8,24},{0,9,145},{19,7,59},{0,8,120},{0,8,56},{0,9,209},
- {17,7,17},{0,8,104},{0,8,40},{0,9,177},{0,8,8},{0,8,136},{0,8,72},
- {0,9,241},{16,7,4},{0,8,84},{0,8,20},{21,8,227},{19,7,43},{0,8,116},
- {0,8,52},{0,9,201},{17,7,13},{0,8,100},{0,8,36},{0,9,169},{0,8,4},
- {0,8,132},{0,8,68},{0,9,233},{16,7,8},{0,8,92},{0,8,28},{0,9,153},
- {20,7,83},{0,8,124},{0,8,60},{0,9,217},{18,7,23},{0,8,108},{0,8,44},
- {0,9,185},{0,8,12},{0,8,140},{0,8,76},{0,9,249},{16,7,3},{0,8,82},
- {0,8,18},{21,8,163},{19,7,35},{0,8,114},{0,8,50},{0,9,197},{17,7,11},
- {0,8,98},{0,8,34},{0,9,165},{0,8,2},{0,8,130},{0,8,66},{0,9,229},
- {16,7,7},{0,8,90},{0,8,26},{0,9,149},{20,7,67},{0,8,122},{0,8,58},
- {0,9,213},{18,7,19},{0,8,106},{0,8,42},{0,9,181},{0,8,10},{0,8,138},
- {0,8,74},{0,9,245},{16,7,5},{0,8,86},{0,8,22},{64,8,0},{19,7,51},
- {0,8,118},{0,8,54},{0,9,205},{17,7,15},{0,8,102},{0,8,38},{0,9,173},
- {0,8,6},{0,8,134},{0,8,70},{0,9,237},{16,7,9},{0,8,94},{0,8,30},
- {0,9,157},{20,7,99},{0,8,126},{0,8,62},{0,9,221},{18,7,27},{0,8,110},
- {0,8,46},{0,9,189},{0,8,14},{0,8,142},{0,8,78},{0,9,253},{96,7,0},
- {0,8,81},{0,8,17},{21,8,131},{18,7,31},{0,8,113},{0,8,49},{0,9,195},
- {16,7,10},{0,8,97},{0,8,33},{0,9,163},{0,8,1},{0,8,129},{0,8,65},
- {0,9,227},{16,7,6},{0,8,89},{0,8,25},{0,9,147},{19,7,59},{0,8,121},
- {0,8,57},{0,9,211},{17,7,17},{0,8,105},{0,8,41},{0,9,179},{0,8,9},
- {0,8,137},{0,8,73},{0,9,243},{16,7,4},{0,8,85},{0,8,21},{16,8,258},
- {19,7,43},{0,8,117},{0,8,53},{0,9,203},{17,7,13},{0,8,101},{0,8,37},
- {0,9,171},{0,8,5},{0,8,133},{0,8,69},{0,9,235},{16,7,8},{0,8,93},
- {0,8,29},{0,9,155},{20,7,83},{0,8,125},{0,8,61},{0,9,219},{18,7,23},
- {0,8,109},{0,8,45},{0,9,187},{0,8,13},{0,8,141},{0,8,77},{0,9,251},
- {16,7,3},{0,8,83},{0,8,19},{21,8,195},{19,7,35},{0,8,115},{0,8,51},
- {0,9,199},{17,7,11},{0,8,99},{0,8,35},{0,9,167},{0,8,3},{0,8,131},
- {0,8,67},{0,9,231},{16,7,7},{0,8,91},{0,8,27},{0,9,151},{20,7,67},
- {0,8,123},{0,8,59},{0,9,215},{18,7,19},{0,8,107},{0,8,43},{0,9,183},
- {0,8,11},{0,8,139},{0,8,75},{0,9,247},{16,7,5},{0,8,87},{0,8,23},
- {64,8,0},{19,7,51},{0,8,119},{0,8,55},{0,9,207},{17,7,15},{0,8,103},
- {0,8,39},{0,9,175},{0,8,7},{0,8,135},{0,8,71},{0,9,239},{16,7,9},
- {0,8,95},{0,8,31},{0,9,159},{20,7,99},{0,8,127},{0,8,63},{0,9,223},
- {18,7,27},{0,8,111},{0,8,47},{0,9,191},{0,8,15},{0,8,143},{0,8,79},
- {0,9,255}
- };
-
- static const code distfix[32] = {
- {16,5,1},{23,5,257},{19,5,17},{27,5,4097},{17,5,5},{25,5,1025},
- {21,5,65},{29,5,16385},{16,5,3},{24,5,513},{20,5,33},{28,5,8193},
- {18,5,9},{26,5,2049},{22,5,129},{64,5,0},{16,5,2},{23,5,385},
- {19,5,25},{27,5,6145},{17,5,7},{25,5,1537},{21,5,97},{29,5,24577},
- {16,5,4},{24,5,769},{20,5,49},{28,5,12289},{18,5,13},{26,5,3073},
- {22,5,193},{64,5,0}
- };
diff --git a/linux/zlib_inflate/inflate.c b/linux/zlib_inflate/inflate.c
deleted file mode 100644
index 58a733b..0000000
--- a/linux/zlib_inflate/inflate.c
+++ /dev/null
@@ -1,786 +0,0 @@
-/* inflate.c -- zlib decompression
- * Copyright (C) 1995-2005 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- *
- * Based on zlib 1.2.3 but modified for the Linux Kernel by
- * Richard Purdie <richard@openedhand.com>
- *
- * Changes mainly for static instead of dynamic memory allocation
- *
- */
-
-#include <linux/zutil.h>
-#include "inftrees.h"
-#include "inflate.h"
-#include "inffast.h"
-#include "infutil.h"
-
-int zlib_inflate_workspacesize(void)
-{
- return sizeof(struct inflate_workspace);
-}
-
-int zlib_inflateReset(z_streamp strm)
-{
- struct inflate_state *state;
-
- if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
- state = (struct inflate_state *)strm->state;
- strm->total_in = strm->total_out = state->total = 0;
- strm->msg = NULL;
- strm->adler = 1; /* to support ill-conceived Java test suite */
- state->mode = HEAD;
- state->last = 0;
- state->havedict = 0;
- state->dmax = 32768U;
- state->hold = 0;
- state->bits = 0;
- state->lencode = state->distcode = state->next = state->codes;
-
- /* Initialise Window */
- state->wsize = 1U << state->wbits;
- state->write = 0;
- state->whave = 0;
-
- return Z_OK;
-}
-
-int zlib_inflateInit2(z_streamp strm, int windowBits)
-{
- struct inflate_state *state;
-
- if (strm == NULL) return Z_STREAM_ERROR;
- strm->msg = NULL; /* in case we return an error */
-
- state = &WS(strm)->inflate_state;
- strm->state = (struct internal_state *)state;
-
- if (windowBits < 0) {
- state->wrap = 0;
- windowBits = -windowBits;
- }
- else {
- state->wrap = (windowBits >> 4) + 1;
- }
- if (windowBits < 8 || windowBits > 15) {
- return Z_STREAM_ERROR;
- }
- state->wbits = (unsigned)windowBits;
- state->window = &WS(strm)->working_window[0];
-
- return zlib_inflateReset(strm);
-}
-
-/*
- Return state with length and distance decoding tables and index sizes set to
- fixed code decoding. This returns fixed tables from inffixed.h.
- */
-static void zlib_fixedtables(struct inflate_state *state)
-{
-# include "inffixed.h"
- state->lencode = lenfix;
- state->lenbits = 9;
- state->distcode = distfix;
- state->distbits = 5;
-}
-
-
-/*
- Update the window with the last wsize (normally 32K) bytes written before
- returning. This is only called when a window is already in use, or when
- output has been written during this inflate call, but the end of the deflate
- stream has not been reached yet. It is also called to window dictionary data
- when a dictionary is loaded.
-
- Providing output buffers larger than 32K to inflate() should provide a speed
- advantage, since only the last 32K of output is copied to the sliding window
- upon return from inflate(), and since all distances after the first 32K of
- output will fall in the output data, making match copies simpler and faster.
- The advantage may be dependent on the size of the processor's data caches.
- */
-static void zlib_updatewindow(z_streamp strm, unsigned out)
-{
- struct inflate_state *state;
- unsigned copy, dist;
-
- state = (struct inflate_state *)strm->state;
-
- /* copy state->wsize or less output bytes into the circular window */
- copy = out - strm->avail_out;
- if (copy >= state->wsize) {
- memcpy(state->window, strm->next_out - state->wsize, state->wsize);
- state->write = 0;
- state->whave = state->wsize;
- }
- else {
- dist = state->wsize - state->write;
- if (dist > copy) dist = copy;
- memcpy(state->window + state->write, strm->next_out - copy, dist);
- copy -= dist;
- if (copy) {
- memcpy(state->window, strm->next_out - copy, copy);
- state->write = copy;
- state->whave = state->wsize;
- }
- else {
- state->write += dist;
- if (state->write == state->wsize) state->write = 0;
- if (state->whave < state->wsize) state->whave += dist;
- }
- }
-}
-
-
-/*
- * At the end of a Deflate-compressed PPP packet, we expect to have seen
- * a `stored' block type value but not the (zero) length bytes.
- */
-/*
- Returns true if inflate is currently at the end of a block generated by
- Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP
- implementation to provide an additional safety check. PPP uses
- Z_SYNC_FLUSH but removes the length bytes of the resulting empty stored
- block. When decompressing, PPP checks that at the end of input packet,
- inflate is waiting for these length bytes.
- */
-static int zlib_inflateSyncPacket(z_streamp strm)
-{
- struct inflate_state *state;
-
- if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
- state = (struct inflate_state *)strm->state;
-
- if (state->mode == STORED && state->bits == 0) {
- state->mode = TYPE;
- return Z_OK;
- }
- return Z_DATA_ERROR;
-}
-
-/* Macros for inflate(): */
-
-/* check function to use adler32() for zlib or crc32() for gzip */
-#define UPDATE(check, buf, len) zlib_adler32(check, buf, len)
-
-/* Load registers with state in inflate() for speed */
-#define LOAD() \
- do { \
- put = strm->next_out; \
- left = strm->avail_out; \
- next = strm->next_in; \
- have = strm->avail_in; \
- hold = state->hold; \
- bits = state->bits; \
- } while (0)
-
-/* Restore state from registers in inflate() */
-#define RESTORE() \
- do { \
- strm->next_out = put; \
- strm->avail_out = left; \
- strm->next_in = next; \
- strm->avail_in = have; \
- state->hold = hold; \
- state->bits = bits; \
- } while (0)
-
-/* Clear the input bit accumulator */
-#define INITBITS() \
- do { \
- hold = 0; \
- bits = 0; \
- } while (0)
-
-/* Get a byte of input into the bit accumulator, or return from inflate()
- if there is no input available. */
-#define PULLBYTE() \
- do { \
- if (have == 0) goto inf_leave; \
- have--; \
- hold += (unsigned long)(*next++) << bits; \
- bits += 8; \
- } while (0)
-
-/* Assure that there are at least n bits in the bit accumulator. If there is
- not enough available input to do that, then return from inflate(). */
-#define NEEDBITS(n) \
- do { \
- while (bits < (unsigned)(n)) \
- PULLBYTE(); \
- } while (0)
-
-/* Return the low n bits of the bit accumulator (n < 16) */
-#define BITS(n) \
- ((unsigned)hold & ((1U << (n)) - 1))
-
-/* Remove n bits from the bit accumulator */
-#define DROPBITS(n) \
- do { \
- hold >>= (n); \
- bits -= (unsigned)(n); \
- } while (0)
-
-/* Remove zero to seven bits as needed to go to a byte boundary */
-#define BYTEBITS() \
- do { \
- hold >>= bits & 7; \
- bits -= bits & 7; \
- } while (0)
-
-/* Reverse the bytes in a 32-bit value */
-#define REVERSE(q) \
- ((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \
- (((q) & 0xff00) << 8) + (((q) & 0xff) << 24))
-
-/*
- inflate() uses a state machine to process as much input data and generate as
- much output data as possible before returning. The state machine is
- structured roughly as follows:
-
- for (;;) switch (state) {
- ...
- case STATEn:
- if (not enough input data or output space to make progress)
- return;
- ... make progress ...
- state = STATEm;
- break;
- ...
- }
-
- so when inflate() is called again, the same case is attempted again, and
- if the appropriate resources are provided, the machine proceeds to the
- next state. The NEEDBITS() macro is usually the way the state evaluates
- whether it can proceed or should return. NEEDBITS() does the return if
- the requested bits are not available. The typical use of the BITS macros
- is:
-
- NEEDBITS(n);
- ... do something with BITS(n) ...
- DROPBITS(n);
-
- where NEEDBITS(n) either returns from inflate() if there isn't enough
- input left to load n bits into the accumulator, or it continues. BITS(n)
- gives the low n bits in the accumulator. When done, DROPBITS(n) drops
- the low n bits off the accumulator. INITBITS() clears the accumulator
- and sets the number of available bits to zero. BYTEBITS() discards just
- enough bits to put the accumulator on a byte boundary. After BYTEBITS()
- and a NEEDBITS(8), then BITS(8) would return the next byte in the stream.
-
- NEEDBITS(n) uses PULLBYTE() to get an available byte of input, or to return
- if there is no input available. The decoding of variable length codes uses
- PULLBYTE() directly in order to pull just enough bytes to decode the next
- code, and no more.
-
- Some states loop until they get enough input, making sure that enough
- state information is maintained to continue the loop where it left off
- if NEEDBITS() returns in the loop. For example, want, need, and keep
- would all have to actually be part of the saved state in case NEEDBITS()
- returns:
-
- case STATEw:
- while (want < need) {
- NEEDBITS(n);
- keep[want++] = BITS(n);
- DROPBITS(n);
- }
- state = STATEx;
- case STATEx:
-
- As shown above, if the next state is also the next case, then the break
- is omitted.
-
- A state may also return if there is not enough output space available to
- complete that state. Those states are copying stored data, writing a
- literal byte, and copying a matching string.
-
- When returning, a "goto inf_leave" is used to update the total counters,
- update the check value, and determine whether any progress has been made
- during that inflate() call in order to return the proper return code.
- Progress is defined as a change in either strm->avail_in or strm->avail_out.
- When there is a window, goto inf_leave will update the window with the last
- output written. If a goto inf_leave occurs in the middle of decompression
- and there is no window currently, goto inf_leave will create one and copy
- output to the window for the next call of inflate().
-
- In this implementation, the flush parameter of inflate() only affects the
- return code (per zlib.h). inflate() always writes as much as possible to
- strm->next_out, given the space available and the provided input--the effect
- documented in zlib.h of Z_SYNC_FLUSH. Furthermore, inflate() always defers
- the allocation of and copying into a sliding window until necessary, which
- provides the effect documented in zlib.h for Z_FINISH when the entire input
- stream available. So the only thing the flush parameter actually does is:
- when flush is set to Z_FINISH, inflate() cannot return Z_OK. Instead it
- will return Z_BUF_ERROR if it has not reached the end of the stream.
- */
-
-int zlib_inflate(z_streamp strm, int flush)
-{
- struct inflate_state *state;
- const unsigned char *next; /* next input */
- unsigned char *put; /* next output */
- unsigned have, left; /* available input and output */
- unsigned long hold; /* bit buffer */
- unsigned bits; /* bits in bit buffer */
- unsigned in, out; /* save starting available input and output */
- unsigned copy; /* number of stored or match bytes to copy */
- unsigned char *from; /* where to copy match bytes from */
- code this; /* current decoding table entry */
- code last; /* parent table entry */
- unsigned len; /* length to copy for repeats, bits to drop */
- int ret; /* return code */
- static const unsigned short order[19] = /* permutation of code lengths */
- {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
-
- /* Do not check for strm->next_out == NULL here as ppc zImage
- inflates to strm->next_out = 0 */
-
- if (strm == NULL || strm->state == NULL ||
- (strm->next_in == NULL && strm->avail_in != 0))
- return Z_STREAM_ERROR;
-
- state = (struct inflate_state *)strm->state;
-
- if (state->mode == TYPE) state->mode = TYPEDO; /* skip check */
- LOAD();
- in = have;
- out = left;
- ret = Z_OK;
- for (;;)
- switch (state->mode) {
- case HEAD:
- if (state->wrap == 0) {
- state->mode = TYPEDO;
- break;
- }
- NEEDBITS(16);
- if (
- ((BITS(8) << 8) + (hold >> 8)) % 31) {
- strm->msg = (char *)"incorrect header check";
- state->mode = BAD;
- break;
- }
- if (BITS(4) != Z_DEFLATED) {
- strm->msg = (char *)"unknown compression method";
- state->mode = BAD;
- break;
- }
- DROPBITS(4);
- len = BITS(4) + 8;
- if (len > state->wbits) {
- strm->msg = (char *)"invalid window size";
- state->mode = BAD;
- break;
- }
- state->dmax = 1U << len;
- strm->adler = state->check = zlib_adler32(0L, NULL, 0);
- state->mode = hold & 0x200 ? DICTID : TYPE;
- INITBITS();
- break;
- case DICTID:
- NEEDBITS(32);
- strm->adler = state->check = REVERSE(hold);
- INITBITS();
- state->mode = DICT;
- case DICT:
- if (state->havedict == 0) {
- RESTORE();
- return Z_NEED_DICT;
- }
- strm->adler = state->check = zlib_adler32(0L, NULL, 0);
- state->mode = TYPE;
- case TYPE:
- if (flush == Z_BLOCK) goto inf_leave;
- case TYPEDO:
- if (state->last) {
- BYTEBITS();
- state->mode = CHECK;
- break;
- }
- NEEDBITS(3);
- state->last = BITS(1);
- DROPBITS(1);
- switch (BITS(2)) {
- case 0: /* stored block */
- state->mode = STORED;
- break;
- case 1: /* fixed block */
- zlib_fixedtables(state);
- state->mode = LEN; /* decode codes */
- break;
- case 2: /* dynamic block */
- state->mode = TABLE;
- break;
- case 3:
- strm->msg = (char *)"invalid block type";
- state->mode = BAD;
- }
- DROPBITS(2);
- break;
- case STORED:
- BYTEBITS(); /* go to byte boundary */
- NEEDBITS(32);
- if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) {
- strm->msg = (char *)"invalid stored block lengths";
- state->mode = BAD;
- break;
- }
- state->length = (unsigned)hold & 0xffff;
- INITBITS();
- state->mode = COPY;
- case COPY:
- copy = state->length;
- if (copy) {
- if (copy > have) copy = have;
- if (copy > left) copy = left;
- if (copy == 0) goto inf_leave;
- memcpy(put, next, copy);
- have -= copy;
- next += copy;
- left -= copy;
- put += copy;
- state->length -= copy;
- break;
- }
- state->mode = TYPE;
- break;
- case TABLE:
- NEEDBITS(14);
- state->nlen = BITS(5) + 257;
- DROPBITS(5);
- state->ndist = BITS(5) + 1;
- DROPBITS(5);
- state->ncode = BITS(4) + 4;
- DROPBITS(4);
-#ifndef PKZIP_BUG_WORKAROUND
- if (state->nlen > 286 || state->ndist > 30) {
- strm->msg = (char *)"too many length or distance symbols";
- state->mode = BAD;
- break;
- }
-#endif
- state->have = 0;
- state->mode = LENLENS;
- case LENLENS:
- while (state->have < state->ncode) {
- NEEDBITS(3);
- state->lens[order[state->have++]] = (unsigned short)BITS(3);
- DROPBITS(3);
- }
- while (state->have < 19)
- state->lens[order[state->have++]] = 0;
- state->next = state->codes;
- state->lencode = (code const *)(state->next);
- state->lenbits = 7;
- ret = zlib_inflate_table(CODES, state->lens, 19, &(state->next),
- &(state->lenbits), state->work);
- if (ret) {
- strm->msg = (char *)"invalid code lengths set";
- state->mode = BAD;
- break;
- }
- state->have = 0;
- state->mode = CODELENS;
- case CODELENS:
- while (state->have < state->nlen + state->ndist) {
- for (;;) {
- this = state->lencode[BITS(state->lenbits)];
- if ((unsigned)(this.bits) <= bits) break;
- PULLBYTE();
- }
- if (this.val < 16) {
- NEEDBITS(this.bits);
- DROPBITS(this.bits);
- state->lens[state->have++] = this.val;
- }
- else {
- if (this.val == 16) {
- NEEDBITS(this.bits + 2);
- DROPBITS(this.bits);
- if (state->have == 0) {
- strm->msg = (char *)"invalid bit length repeat";
- state->mode = BAD;
- break;
- }
- len = state->lens[state->have - 1];
- copy = 3 + BITS(2);
- DROPBITS(2);
- }
- else if (this.val == 17) {
- NEEDBITS(this.bits + 3);
- DROPBITS(this.bits);
- len = 0;
- copy = 3 + BITS(3);
- DROPBITS(3);
- }
- else {
- NEEDBITS(this.bits + 7);
- DROPBITS(this.bits);
- len = 0;
- copy = 11 + BITS(7);
- DROPBITS(7);
- }
- if (state->have + copy > state->nlen + state->ndist) {
- strm->msg = (char *)"invalid bit length repeat";
- state->mode = BAD;
- break;
- }
- while (copy--)
- state->lens[state->have++] = (unsigned short)len;
- }
- }
-
- /* handle error breaks in while */
- if (state->mode == BAD) break;
-
- /* build code tables */
- state->next = state->codes;
- state->lencode = (code const *)(state->next);
- state->lenbits = 9;
- ret = zlib_inflate_table(LENS, state->lens, state->nlen, &(state->next),
- &(state->lenbits), state->work);
- if (ret) {
- strm->msg = (char *)"invalid literal/lengths set";
- state->mode = BAD;
- break;
- }
- state->distcode = (code const *)(state->next);
- state->distbits = 6;
- ret = zlib_inflate_table(DISTS, state->lens + state->nlen, state->ndist,
- &(state->next), &(state->distbits), state->work);
- if (ret) {
- strm->msg = (char *)"invalid distances set";
- state->mode = BAD;
- break;
- }
- state->mode = LEN;
- case LEN:
- if (have >= 6 && left >= 258) {
- RESTORE();
- inflate_fast(strm, out);
- LOAD();
- break;
- }
- for (;;) {
- this = state->lencode[BITS(state->lenbits)];
- if ((unsigned)(this.bits) <= bits) break;
- PULLBYTE();
- }
- if (this.op && (this.op & 0xf0) == 0) {
- last = this;
- for (;;) {
- this = state->lencode[last.val +
- (BITS(last.bits + last.op) >> last.bits)];
- if ((unsigned)(last.bits + this.bits) <= bits) break;
- PULLBYTE();
- }
- DROPBITS(last.bits);
- }
- DROPBITS(this.bits);
- state->length = (unsigned)this.val;
- if ((int)(this.op) == 0) {
- state->mode = LIT;
- break;
- }
- if (this.op & 32) {
- state->mode = TYPE;
- break;
- }
- if (this.op & 64) {
- strm->msg = (char *)"invalid literal/length code";
- state->mode = BAD;
- break;
- }
- state->extra = (unsigned)(this.op) & 15;
- state->mode = LENEXT;
- case LENEXT:
- if (state->extra) {
- NEEDBITS(state->extra);
- state->length += BITS(state->extra);
- DROPBITS(state->extra);
- }
- state->mode = DIST;
- case DIST:
- for (;;) {
- this = state->distcode[BITS(state->distbits)];
- if ((unsigned)(this.bits) <= bits) break;
- PULLBYTE();
- }
- if ((this.op & 0xf0) == 0) {
- last = this;
- for (;;) {
- this = state->distcode[last.val +
- (BITS(last.bits + last.op) >> last.bits)];
- if ((unsigned)(last.bits + this.bits) <= bits) break;
- PULLBYTE();
- }
- DROPBITS(last.bits);
- }
- DROPBITS(this.bits);
- if (this.op & 64) {
- strm->msg = (char *)"invalid distance code";
- state->mode = BAD;
- break;
- }
- state->offset = (unsigned)this.val;
- state->extra = (unsigned)(this.op) & 15;
- state->mode = DISTEXT;
- case DISTEXT:
- if (state->extra) {
- NEEDBITS(state->extra);
- state->offset += BITS(state->extra);
- DROPBITS(state->extra);
- }
-#ifdef INFLATE_STRICT
- if (state->offset > state->dmax) {
- strm->msg = (char *)"invalid distance too far back";
- state->mode = BAD;
- break;
- }
-#endif
- if (state->offset > state->whave + out - left) {
- strm->msg = (char *)"invalid distance too far back";
- state->mode = BAD;
- break;
- }
- state->mode = MATCH;
- case MATCH:
- if (left == 0) goto inf_leave;
- copy = out - left;
- if (state->offset > copy) { /* copy from window */
- copy = state->offset - copy;
- if (copy > state->write) {
- copy -= state->write;
- from = state->window + (state->wsize - copy);
- }
- else
- from = state->window + (state->write - copy);
- if (copy > state->length) copy = state->length;
- }
- else { /* copy from output */
- from = put - state->offset;
- copy = state->length;
- }
- if (copy > left) copy = left;
- left -= copy;
- state->length -= copy;
- do {
- *put++ = *from++;
- } while (--copy);
- if (state->length == 0) state->mode = LEN;
- break;
- case LIT:
- if (left == 0) goto inf_leave;
- *put++ = (unsigned char)(state->length);
- left--;
- state->mode = LEN;
- break;
- case CHECK:
- if (state->wrap) {
- NEEDBITS(32);
- out -= left;
- strm->total_out += out;
- state->total += out;
- if (out)
- strm->adler = state->check =
- UPDATE(state->check, put - out, out);
- out = left;
- if ((
- REVERSE(hold)) != state->check) {
- strm->msg = (char *)"incorrect data check";
- state->mode = BAD;
- break;
- }
- INITBITS();
- }
- state->mode = DONE;
- case DONE:
- ret = Z_STREAM_END;
- goto inf_leave;
- case BAD:
- ret = Z_DATA_ERROR;
- goto inf_leave;
- case MEM:
- return Z_MEM_ERROR;
- case SYNC:
- default:
- return Z_STREAM_ERROR;
- }
-
- /*
- Return from inflate(), updating the total counts and the check value.
- If there was no progress during the inflate() call, return a buffer
- error. Call zlib_updatewindow() to create and/or update the window state.
- */
- inf_leave:
- RESTORE();
- if (state->wsize || (state->mode < CHECK && out != strm->avail_out))
- zlib_updatewindow(strm, out);
-
- in -= strm->avail_in;
- out -= strm->avail_out;
- strm->total_in += in;
- strm->total_out += out;
- state->total += out;
- if (state->wrap && out)
- strm->adler = state->check =
- UPDATE(state->check, strm->next_out - out, out);
-
- strm->data_type = state->bits + (state->last ? 64 : 0) +
- (state->mode == TYPE ? 128 : 0);
-
- if (flush == Z_PACKET_FLUSH && ret == Z_OK &&
- strm->avail_out != 0 && strm->avail_in == 0)
- return zlib_inflateSyncPacket(strm);
-
- if (((in == 0 && out == 0) || flush == Z_FINISH) && ret == Z_OK)
- ret = Z_BUF_ERROR;
-
- return ret;
-}
-
-int zlib_inflateEnd(z_streamp strm)
-{
- if (strm == NULL || strm->state == NULL)
- return Z_STREAM_ERROR;
- return Z_OK;
-}
-
-/*
- * This subroutine adds the data at next_in/avail_in to the output history
- * without performing any output. The output buffer must be "caught up";
- * i.e. no pending output but this should always be the case. The state must
- * be waiting on the start of a block (i.e. mode == TYPE or HEAD). On exit,
- * the output will also be caught up, and the checksum will have been updated
- * if need be.
- */
-int zlib_inflateIncomp(z_stream *z)
-{
- struct inflate_state *state = (struct inflate_state *)z->state;
- Byte *saved_no = z->next_out;
- uInt saved_ao = z->avail_out;
-
- if (state->mode != TYPE && state->mode != HEAD)
- return Z_DATA_ERROR;
-
- /* Setup some variables to allow misuse of updateWindow */
- z->avail_out = 0;
- z->next_out = (unsigned char*)z->next_in + z->avail_in;
-
- zlib_updatewindow(z, z->avail_in);
-
- /* Restore saved variables */
- z->avail_out = saved_ao;
- z->next_out = saved_no;
-
- z->adler = state->check =
- UPDATE(state->check, z->next_in, z->avail_in);
-
- z->total_out += z->avail_in;
- z->total_in += z->avail_in;
- z->next_in += z->avail_in;
- state->total += z->avail_in;
- z->avail_in = 0;
-
- return Z_OK;
-}
diff --git a/linux/zlib_inflate/inflate.h b/linux/zlib_inflate/inflate.h
deleted file mode 100644
index 3d17b3d..0000000
--- a/linux/zlib_inflate/inflate.h
+++ /dev/null
@@ -1,111 +0,0 @@
-#ifndef INFLATE_H
-#define INFLATE_H
-
-/* inflate.h -- internal inflate state definition
- * Copyright (C) 1995-2004 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-/* Possible inflate modes between inflate() calls */
-typedef enum {
- HEAD, /* i: waiting for magic header */
- FLAGS, /* i: waiting for method and flags (gzip) */
- TIME, /* i: waiting for modification time (gzip) */
- OS, /* i: waiting for extra flags and operating system (gzip) */
- EXLEN, /* i: waiting for extra length (gzip) */
- EXTRA, /* i: waiting for extra bytes (gzip) */
- NAME, /* i: waiting for end of file name (gzip) */
- COMMENT, /* i: waiting for end of comment (gzip) */
- HCRC, /* i: waiting for header crc (gzip) */
- DICTID, /* i: waiting for dictionary check value */
- DICT, /* waiting for inflateSetDictionary() call */
- TYPE, /* i: waiting for type bits, including last-flag bit */
- TYPEDO, /* i: same, but skip check to exit inflate on new block */
- STORED, /* i: waiting for stored size (length and complement) */
- COPY, /* i/o: waiting for input or output to copy stored block */
- TABLE, /* i: waiting for dynamic block table lengths */
- LENLENS, /* i: waiting for code length code lengths */
- CODELENS, /* i: waiting for length/lit and distance code lengths */
- LEN, /* i: waiting for length/lit code */
- LENEXT, /* i: waiting for length extra bits */
- DIST, /* i: waiting for distance code */
- DISTEXT, /* i: waiting for distance extra bits */
- MATCH, /* o: waiting for output space to copy string */
- LIT, /* o: waiting for output space to write literal */
- CHECK, /* i: waiting for 32-bit check value */
- LENGTH, /* i: waiting for 32-bit length (gzip) */
- DONE, /* finished check, done -- remain here until reset */
- BAD, /* got a data error -- remain here until reset */
- MEM, /* got an inflate() memory error -- remain here until reset */
- SYNC /* looking for synchronization bytes to restart inflate() */
-} inflate_mode;
-
-/*
- State transitions between above modes -
-
- (most modes can go to the BAD or MEM mode -- not shown for clarity)
-
- Process header:
- HEAD -> (gzip) or (zlib)
- (gzip) -> FLAGS -> TIME -> OS -> EXLEN -> EXTRA -> NAME
- NAME -> COMMENT -> HCRC -> TYPE
- (zlib) -> DICTID or TYPE
- DICTID -> DICT -> TYPE
- Read deflate blocks:
- TYPE -> STORED or TABLE or LEN or CHECK
- STORED -> COPY -> TYPE
- TABLE -> LENLENS -> CODELENS -> LEN
- Read deflate codes:
- LEN -> LENEXT or LIT or TYPE
- LENEXT -> DIST -> DISTEXT -> MATCH -> LEN
- LIT -> LEN
- Process trailer:
- CHECK -> LENGTH -> DONE
- */
-
-/* state maintained between inflate() calls. Approximately 7K bytes. */
-struct inflate_state {
- inflate_mode mode; /* current inflate mode */
- int last; /* true if processing last block */
- int wrap; /* bit 0 true for zlib, bit 1 true for gzip */
- int havedict; /* true if dictionary provided */
- int flags; /* gzip header method and flags (0 if zlib) */
- unsigned dmax; /* zlib header max distance (INFLATE_STRICT) */
- unsigned long check; /* protected copy of check value */
- unsigned long total; /* protected copy of output count */
- /* gz_headerp head; */ /* where to save gzip header information */
- /* sliding window */
- unsigned wbits; /* log base 2 of requested window size */
- unsigned wsize; /* window size or zero if not using window */
- unsigned whave; /* valid bytes in the window */
- unsigned write; /* window write index */
- unsigned char *window; /* allocated sliding window, if needed */
- /* bit accumulator */
- unsigned long hold; /* input bit accumulator */
- unsigned bits; /* number of bits in "in" */
- /* for string and stored block copying */
- unsigned length; /* literal or length of data to copy */
- unsigned offset; /* distance back to copy string from */
- /* for table and code decoding */
- unsigned extra; /* extra bits needed */
- /* fixed and dynamic code tables */
- code const *lencode; /* starting table for length/literal codes */
- code const *distcode; /* starting table for distance codes */
- unsigned lenbits; /* index bits for lencode */
- unsigned distbits; /* index bits for distcode */
- /* dynamic table building */
- unsigned ncode; /* number of code length code lengths */
- unsigned nlen; /* number of length code lengths */
- unsigned ndist; /* number of distance code lengths */
- unsigned have; /* number of code lengths in lens[] */
- code *next; /* next available space in codes[] */
- unsigned short lens[320]; /* temporary storage for code lengths */
- unsigned short work[288]; /* work area for code table building */
- code codes[ENOUGH]; /* space for code tables */
-};
-#endif
diff --git a/linux/zlib_inflate/inftrees.c b/linux/zlib_inflate/inftrees.c
deleted file mode 100644
index 3fe6ce5..0000000
--- a/linux/zlib_inflate/inftrees.c
+++ /dev/null
@@ -1,315 +0,0 @@
-/* inftrees.c -- generate Huffman trees for efficient decoding
- * Copyright (C) 1995-2005 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-#include <linux/zutil.h>
-#include "inftrees.h"
-
-#define MAXBITS 15
-
-/*
- Build a set of tables to decode the provided canonical Huffman code.
- The code lengths are lens[0..codes-1]. The result starts at *table,
- whose indices are 0..2^bits-1. work is a writable array of at least
- lens shorts, which is used as a work area. type is the type of code
- to be generated, CODES, LENS, or DISTS. On return, zero is success,
- -1 is an invalid code, and +1 means that ENOUGH isn't enough. table
- on return points to the next available entry's address. bits is the
- requested root table index bits, and on return it is the actual root
- table index bits. It will differ if the request is greater than the
- longest code or if it is less than the shortest code.
- */
-int zlib_inflate_table(codetype type, unsigned short *lens, unsigned codes,
- code **table, unsigned *bits, unsigned short *work)
-{
- unsigned len; /* a code's length in bits */
- unsigned sym; /* index of code symbols */
- unsigned min, max; /* minimum and maximum code lengths */
- unsigned root; /* number of index bits for root table */
- unsigned curr; /* number of index bits for current table */
- unsigned drop; /* code bits to drop for sub-table */
- int left; /* number of prefix codes available */
- unsigned used; /* code entries in table used */
- unsigned huff; /* Huffman code */
- unsigned incr; /* for incrementing code, index */
- unsigned fill; /* index for replicating entries */
- unsigned low; /* low bits for current root entry */
- unsigned mask; /* mask for low root bits */
- code this; /* table entry for duplication */
- code *next; /* next available space in table */
- const unsigned short *base; /* base value table to use */
- const unsigned short *extra; /* extra bits table to use */
- int end; /* use base and extra for symbol > end */
- unsigned short count[MAXBITS+1]; /* number of codes of each length */
- unsigned short offs[MAXBITS+1]; /* offsets in table for each length */
- static const unsigned short lbase[31] = { /* Length codes 257..285 base */
- 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
- 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
- static const unsigned short lext[31] = { /* Length codes 257..285 extra */
- 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18,
- 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 201, 196};
- static const unsigned short dbase[32] = { /* Distance codes 0..29 base */
- 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
- 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
- 8193, 12289, 16385, 24577, 0, 0};
- static const unsigned short dext[32] = { /* Distance codes 0..29 extra */
- 16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22,
- 23, 23, 24, 24, 25, 25, 26, 26, 27, 27,
- 28, 28, 29, 29, 64, 64};
-
- /*
- Process a set of code lengths to create a canonical Huffman code. The
- code lengths are lens[0..codes-1]. Each length corresponds to the
- symbols 0..codes-1. The Huffman code is generated by first sorting the
- symbols by length from short to long, and retaining the symbol order
- for codes with equal lengths. Then the code starts with all zero bits
- for the first code of the shortest length, and the codes are integer
- increments for the same length, and zeros are appended as the length
- increases. For the deflate format, these bits are stored backwards
- from their more natural integer increment ordering, and so when the
- decoding tables are built in the large loop below, the integer codes
- are incremented backwards.
-
- This routine assumes, but does not check, that all of the entries in
- lens[] are in the range 0..MAXBITS. The caller must assure this.
- 1..MAXBITS is interpreted as that code length. zero means that that
- symbol does not occur in this code.
-
- The codes are sorted by computing a count of codes for each length,
- creating from that a table of starting indices for each length in the
- sorted table, and then entering the symbols in order in the sorted
- table. The sorted table is work[], with that space being provided by
- the caller.
-
- The length counts are used for other purposes as well, i.e. finding
- the minimum and maximum length codes, determining if there are any
- codes at all, checking for a valid set of lengths, and looking ahead
- at length counts to determine sub-table sizes when building the
- decoding tables.
- */
-
- /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */
- for (len = 0; len <= MAXBITS; len++)
- count[len] = 0;
- for (sym = 0; sym < codes; sym++)
- count[lens[sym]]++;
-
- /* bound code lengths, force root to be within code lengths */
- root = *bits;
- for (max = MAXBITS; max >= 1; max--)
- if (count[max] != 0) break;
- if (root > max) root = max;
- if (max == 0) { /* no symbols to code at all */
- this.op = (unsigned char)64; /* invalid code marker */
- this.bits = (unsigned char)1;
- this.val = (unsigned short)0;
- *(*table)++ = this; /* make a table to force an error */
- *(*table)++ = this;
- *bits = 1;
- return 0; /* no symbols, but wait for decoding to report error */
- }
- for (min = 1; min <= MAXBITS; min++)
- if (count[min] != 0) break;
- if (root < min) root = min;
-
- /* check for an over-subscribed or incomplete set of lengths */
- left = 1;
- for (len = 1; len <= MAXBITS; len++) {
- left <<= 1;
- left -= count[len];
- if (left < 0) return -1; /* over-subscribed */
- }
- if (left > 0 && (type == CODES || max != 1))
- return -1; /* incomplete set */
-
- /* generate offsets into symbol table for each length for sorting */
- offs[1] = 0;
- for (len = 1; len < MAXBITS; len++)
- offs[len + 1] = offs[len] + count[len];
-
- /* sort symbols by length, by symbol order within each length */
- for (sym = 0; sym < codes; sym++)
- if (lens[sym] != 0) work[offs[lens[sym]]++] = (unsigned short)sym;
-
- /*
- Create and fill in decoding tables. In this loop, the table being
- filled is at next and has curr index bits. The code being used is huff
- with length len. That code is converted to an index by dropping drop
- bits off of the bottom. For codes where len is less than drop + curr,
- those top drop + curr - len bits are incremented through all values to
- fill the table with replicated entries.
-
- root is the number of index bits for the root table. When len exceeds
- root, sub-tables are created pointed to by the root entry with an index
- of the low root bits of huff. This is saved in low to check for when a
- new sub-table should be started. drop is zero when the root table is
- being filled, and drop is root when sub-tables are being filled.
-
- When a new sub-table is needed, it is necessary to look ahead in the
- code lengths to determine what size sub-table is needed. The length
- counts are used for this, and so count[] is decremented as codes are
- entered in the tables.
-
- used keeps track of how many table entries have been allocated from the
- provided *table space. It is checked when a LENS table is being made
- against the space in *table, ENOUGH, minus the maximum space needed by
- the worst case distance code, MAXD. This should never happen, but the
- sufficiency of ENOUGH has not been proven exhaustively, hence the check.
- This assumes that when type == LENS, bits == 9.
-
- sym increments through all symbols, and the loop terminates when
- all codes of length max, i.e. all codes, have been processed. This
- routine permits incomplete codes, so another loop after this one fills
- in the rest of the decoding tables with invalid code markers.
- */
-
- /* set up for code type */
- switch (type) {
- case CODES:
- base = extra = work; /* dummy value--not used */
- end = 19;
- break;
- case LENS:
- base = lbase;
- base -= 257;
- extra = lext;
- extra -= 257;
- end = 256;
- break;
- default: /* DISTS */
- base = dbase;
- extra = dext;
- end = -1;
- }
-
- /* initialize state for loop */
- huff = 0; /* starting code */
- sym = 0; /* starting code symbol */
- len = min; /* starting code length */
- next = *table; /* current table to fill in */
- curr = root; /* current table index bits */
- drop = 0; /* current bits to drop from code for index */
- low = (unsigned)(-1); /* trigger new sub-table when len > root */
- used = 1U << root; /* use root table entries */
- mask = used - 1; /* mask for comparing low */
-
- /* check available table space */
- if (type == LENS && used >= ENOUGH - MAXD)
- return 1;
-
- /* process all codes and make table entries */
- for (;;) {
- /* create table entry */
- this.bits = (unsigned char)(len - drop);
- if ((int)(work[sym]) < end) {
- this.op = (unsigned char)0;
- this.val = work[sym];
- }
- else if ((int)(work[sym]) > end) {
- this.op = (unsigned char)(extra[work[sym]]);
- this.val = base[work[sym]];
- }
- else {
- this.op = (unsigned char)(32 + 64); /* end of block */
- this.val = 0;
- }
-
- /* replicate for those indices with low len bits equal to huff */
- incr = 1U << (len - drop);
- fill = 1U << curr;
- min = fill; /* save offset to next table */
- do {
- fill -= incr;
- next[(huff >> drop) + fill] = this;
- } while (fill != 0);
-
- /* backwards increment the len-bit code huff */
- incr = 1U << (len - 1);
- while (huff & incr)
- incr >>= 1;
- if (incr != 0) {
- huff &= incr - 1;
- huff += incr;
- }
- else
- huff = 0;
-
- /* go to next symbol, update count, len */
- sym++;
- if (--(count[len]) == 0) {
- if (len == max) break;
- len = lens[work[sym]];
- }
-
- /* create new sub-table if needed */
- if (len > root && (huff & mask) != low) {
- /* if first time, transition to sub-tables */
- if (drop == 0)
- drop = root;
-
- /* increment past last table */
- next += min; /* here min is 1 << curr */
-
- /* determine length of next table */
- curr = len - drop;
- left = (int)(1 << curr);
- while (curr + drop < max) {
- left -= count[curr + drop];
- if (left <= 0) break;
- curr++;
- left <<= 1;
- }
-
- /* check for enough space */
- used += 1U << curr;
- if (type == LENS && used >= ENOUGH - MAXD)
- return 1;
-
- /* point entry in root table to sub-table */
- low = huff & mask;
- (*table)[low].op = (unsigned char)curr;
- (*table)[low].bits = (unsigned char)root;
- (*table)[low].val = (unsigned short)(next - *table);
- }
- }
-
- /*
- Fill in rest of table for incomplete codes. This loop is similar to the
- loop above in incrementing huff for table indices. It is assumed that
- len is equal to curr + drop, so there is no loop needed to increment
- through high index bits. When the current sub-table is filled, the loop
- drops back to the root table to fill in any remaining entries there.
- */
- this.op = (unsigned char)64; /* invalid code marker */
- this.bits = (unsigned char)(len - drop);
- this.val = (unsigned short)0;
- while (huff != 0) {
- /* when done with sub-table, drop back to root table */
- if (drop != 0 && (huff & mask) != low) {
- drop = 0;
- len = root;
- next = *table;
- this.bits = (unsigned char)len;
- }
-
- /* put invalid code marker in table */
- next[huff >> drop] = this;
-
- /* backwards increment the len-bit code huff */
- incr = 1U << (len - 1);
- while (huff & incr)
- incr >>= 1;
- if (incr != 0) {
- huff &= incr - 1;
- huff += incr;
- }
- else
- huff = 0;
- }
-
- /* set return parameters */
- *table += used;
- *bits = root;
- return 0;
-}
diff --git a/linux/zlib_inflate/inftrees.h b/linux/zlib_inflate/inftrees.h
deleted file mode 100644
index b70b473..0000000
--- a/linux/zlib_inflate/inftrees.h
+++ /dev/null
@@ -1,59 +0,0 @@
-#ifndef INFTREES_H
-#define INFTREES_H
-
-/* inftrees.h -- header to use inftrees.c
- * Copyright (C) 1995-2005 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-/* Structure for decoding tables. Each entry provides either the
- information needed to do the operation requested by the code that
- indexed that table entry, or it provides a pointer to another
- table that indexes more bits of the code. op indicates whether
- the entry is a pointer to another table, a literal, a length or
- distance, an end-of-block, or an invalid code. For a table
- pointer, the low four bits of op is the number of index bits of
- that table. For a length or distance, the low four bits of op
- is the number of extra bits to get after the code. bits is
- the number of bits in this code or part of the code to drop off
- of the bit buffer. val is the actual byte to output in the case
- of a literal, the base length or distance, or the offset from
- the current table to the next table. Each entry is four bytes. */
-typedef struct {
- unsigned char op; /* operation, extra bits, table bits */
- unsigned char bits; /* bits in this part of the code */
- unsigned short val; /* offset in table or code value */
-} code;
-
-/* op values as set by inflate_table():
- 00000000 - literal
- 0000tttt - table link, tttt != 0 is the number of table index bits
- 0001eeee - length or distance, eeee is the number of extra bits
- 01100000 - end of block
- 01000000 - invalid code
- */
-
-/* Maximum size of dynamic tree. The maximum found in a long but non-
- exhaustive search was 1444 code structures (852 for length/literals
- and 592 for distances, the latter actually the result of an
- exhaustive search). The true maximum is not known, but the value
- below is more than safe. */
-#define ENOUGH 2048
-#define MAXD 592
-
-/* Type of code to build for inftable() */
-typedef enum {
- CODES,
- LENS,
- DISTS
-} codetype;
-
-extern int zlib_inflate_table (codetype type, unsigned short *lens,
- unsigned codes, code **table,
- unsigned *bits, unsigned short *work);
-#endif
diff --git a/linux/zlib_inflate/infutil.c b/linux/zlib_inflate/infutil.c
deleted file mode 100644
index 4824c2c..0000000
--- a/linux/zlib_inflate/infutil.c
+++ /dev/null
@@ -1,49 +0,0 @@
-#include <linux/zutil.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-/* Utility function: initialize zlib, unpack binary blob, clean up zlib,
- * return len or negative error code.
- */
-int zlib_inflate_blob(void *gunzip_buf, unsigned int sz,
- const void *buf, unsigned int len)
-{
- const u8 *zbuf = buf;
- struct z_stream_s *strm;
- int rc;
-
- rc = -ENOMEM;
- strm = kmalloc(sizeof(*strm), GFP_KERNEL);
- if (strm == NULL)
- goto gunzip_nomem1;
- strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
- if (strm->workspace == NULL)
- goto gunzip_nomem2;
-
- /* gzip header (1f,8b,08... 10 bytes total + possible asciz filename)
- * expected to be stripped from input
- */
- strm->next_in = zbuf;
- strm->avail_in = len;
- strm->next_out = gunzip_buf;
- strm->avail_out = sz;
-
- rc = zlib_inflateInit2(strm, -MAX_WBITS);
- if (rc == Z_OK) {
- rc = zlib_inflate(strm, Z_FINISH);
- /* after Z_FINISH, only Z_STREAM_END is "we unpacked it all" */
- if (rc == Z_STREAM_END)
- rc = sz - strm->avail_out;
- else
- rc = -EINVAL;
- zlib_inflateEnd(strm);
- } else
- rc = -EINVAL;
-
- kfree(strm->workspace);
-gunzip_nomem2:
- kfree(strm);
-gunzip_nomem1:
- return rc; /* returns Z_OK (0) if successful */
-}
diff --git a/linux/zlib_inflate/infutil.h b/linux/zlib_inflate/infutil.h
deleted file mode 100644
index eb1a900..0000000
--- a/linux/zlib_inflate/infutil.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* infutil.h -- types and macros common to blocks and codes
- * Copyright (C) 1995-1998 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-#ifndef _INFUTIL_H
-#define _INFUTIL_H
-
-#include <linux/zlib.h>
-
-/* memory allocation for inflation */
-
-struct inflate_workspace {
- struct inflate_state inflate_state;
- unsigned char working_window[1 << MAX_WBITS];
-};
-
-#define WS(z) ((struct inflate_workspace *)(z->workspace))
-
-#endif