forked from ClickHouse/zlib-ng
-
Notifications
You must be signed in to change notification settings - Fork 0
/
inffast.c
325 lines (299 loc) · 13.8 KB
/
inffast.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
/* inffast.c -- fast decoding
* Copyright (C) 1995-2017 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
#include "zbuild.h"
#include "zutil.h"
#include "inftrees.h"
#include "inflate.h"
#include "inffast.h"
#include "inflate_p.h"
#include "functable.h"
/* Load 64 bits from IN and place the bytes at offset BITS in the result. */
static inline uint64_t load_64_bits(const unsigned char *in, unsigned bits) {
uint64_t chunk;
memcpy(&chunk, in, sizeof(chunk));
#if BYTE_ORDER == LITTLE_ENDIAN
return chunk << bits;
#else
return ZSWAP64(chunk) << bits;
#endif
}
/*
Decode literal, length, and distance codes and write out the resulting
literal and match bytes until either not enough input or output is
available, an end-of-block is encountered, or a data error is encountered.
When large enough input and output buffers are supplied to inflate(), for
example, a 16K input buffer and a 64K output buffer, more than 95% of the
inflate execution time is spent in this routine.
Entry assumptions:
state->mode == LEN
strm->avail_in >= INFLATE_FAST_MIN_HAVE
strm->avail_out >= INFLATE_FAST_MIN_LEFT
start >= strm->avail_out
state->bits < 8
On return, state->mode is one of:
LEN -- ran out of enough output space or enough available input
TYPE -- reached end of block code, inflate() to interpret next block
BAD -- error in block data
Notes:
- The maximum input bits used by a length/distance pair is 15 bits for the
length code, 5 bits for the length extra, 15 bits for the distance code,
and 13 bits for the distance extra. This totals 48 bits, or six bytes.
Therefore if strm->avail_in >= 6, then there is enough input to avoid
checking for available input while decoding.
- On some architectures, it can be significantly faster (e.g. up to 1.2x
faster on x86_64) to load from strm->next_in 64 bits, or 8 bytes, at a
time, so INFLATE_FAST_MIN_HAVE == 8.
- The maximum bytes that a single length/distance pair can output is 258
bytes, which is the maximum length that can be coded. inflate_fast()
requires strm->avail_out >= 258 for each loop to avoid checking for
output space.
*/
void Z_INTERNAL zng_inflate_fast(PREFIX3(stream) *strm, unsigned long start) {
/* start: inflate()'s starting value for strm->avail_out */
struct inflate_state *state;
z_const unsigned char *in; /* local strm->next_in */
const unsigned char *last; /* have enough input while in < last */
unsigned char *out; /* local strm->next_out */
unsigned char *beg; /* inflate()'s initial strm->next_out */
unsigned char *end; /* while out < end, enough space available */
unsigned char *safe; /* can use chunkcopy provided out < safe */
#ifdef INFLATE_STRICT
unsigned dmax; /* maximum distance from zlib header */
#endif
unsigned wsize; /* window size or zero if not using window */
unsigned whave; /* valid bytes in the window */
unsigned wnext; /* window write index */
unsigned char *window; /* allocated sliding window, if wsize != 0 */
/* hold is a local copy of strm->hold. By default, hold satisfies the same
invariants that strm->hold does, namely that (hold >> bits) == 0. This
invariant is kept by loading bits into hold one byte at a time, like:
hold |= next_byte_of_input << bits; in++; bits += 8;
If we need to ensure that bits >= 15 then this code snippet is simply
repeated. Over one iteration of the outermost do/while loop, this
happens up to six times (48 bits of input), as described in the NOTES
above.
However, on some little endian architectures, it can be significantly
faster to load 64 bits once instead of 8 bits six times:
if (bits <= 16) {
hold |= next_8_bytes_of_input << bits; in += 6; bits += 48;
}
Unlike the simpler one byte load, shifting the next_8_bytes_of_input
by bits will overflow and lose those high bits, up to 2 bytes' worth.
The conservative estimate is therefore that we have read only 6 bytes
(48 bits). Again, as per the NOTES above, 48 bits is sufficient for the
rest of the iteration, and we will not need to load another 8 bytes.
Inside this function, we no longer satisfy (hold >> bits) == 0, but
this is not problematic, even if that overflow does not land on an 8 bit
byte boundary. Those excess bits will eventually shift down lower as the
Huffman decoder consumes input, and when new input bits need to be loaded
into the bits variable, the same input bits will be or'ed over those
existing bits. A bitwise or is idempotent: (a | b | b) equals (a | b).
Note that we therefore write that load operation as "hold |= etc" and not
"hold += etc".
Outside that loop, at the end of the function, hold is bitwise and'ed
with (1<<bits)-1 to drop those excess bits so that, on function exit, we
keep the invariant that (state->hold >> state->bits) == 0.
*/
uint64_t hold; /* local strm->hold */
unsigned bits; /* local strm->bits */
code const *lcode; /* local strm->lencode */
code const *dcode; /* local strm->distcode */
unsigned lmask; /* mask for first level of length codes */
unsigned dmask; /* mask for first level of distance codes */
const code *here; /* retrieved table entry */
unsigned op; /* code bits, operation, extra bits, or */
/* window position, window bytes to copy */
unsigned len; /* match length, unused bytes */
unsigned dist; /* match distance */
unsigned char *from; /* where to copy match from */
/* copy state to local variables */
state = (struct inflate_state *)strm->state;
in = strm->next_in;
last = in + (strm->avail_in - (INFLATE_FAST_MIN_HAVE - 1));
out = strm->next_out;
beg = out - (start - strm->avail_out);
end = out + (strm->avail_out - (INFLATE_FAST_MIN_LEFT - 1));
safe = out + strm->avail_out;
#ifdef INFLATE_STRICT
dmax = state->dmax;
#endif
wsize = state->wsize;
whave = state->whave;
wnext = state->wnext;
window = state->window;
hold = state->hold;
bits = state->bits;
lcode = state->lencode;
dcode = state->distcode;
lmask = (1U << state->lenbits) - 1;
dmask = (1U << state->distbits) - 1;
/* decode literals and length/distances until end-of-block or not enough
input data or output space */
do {
if (bits < 15) {
hold |= load_64_bits(in, bits);
in += 6;
bits += 48;
}
here = lcode + (hold & lmask);
dolen:
DROPBITS(here->bits);
op = here->op;
if (op == 0) { /* literal */
Tracevv((stderr, here->val >= 0x20 && here->val < 0x7f ?
"inflate: literal '%c'\n" :
"inflate: literal 0x%02x\n", here->val));
*out++ = (unsigned char)(here->val);
} else if (op & 16) { /* length base */
len = here->val;
op &= 15; /* number of extra bits */
if (bits < op) {
hold |= load_64_bits(in, bits);
in += 6;
bits += 48;
}
len += BITS(op);
DROPBITS(op);
Tracevv((stderr, "inflate: length %u\n", len));
if (bits < 15) {
hold |= load_64_bits(in, bits);
in += 6;
bits += 48;
}
here = dcode + (hold & dmask);
dodist:
DROPBITS(here->bits);
op = here->op;
if (op & 16) { /* distance base */
dist = here->val;
op &= 15; /* number of extra bits */
if (bits < op) {
hold |= load_64_bits(in, bits);
in += 6;
bits += 48;
}
dist += BITS(op);
#ifdef INFLATE_STRICT
if (dist > dmax) {
SET_BAD("invalid distance too far back");
break;
}
#endif
DROPBITS(op);
Tracevv((stderr, "inflate: distance %u\n", dist));
op = (unsigned)(out - beg); /* max distance in output */
if (dist > op) { /* see if copy from window */
op = dist - op; /* distance back in window */
if (op > whave) {
if (state->sane) {
SET_BAD("invalid distance too far back");
break;
}
#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR
if (len <= op - whave) {
do {
*out++ = 0;
} while (--len);
continue;
}
len -= op - whave;
do {
*out++ = 0;
} while (--op > whave);
if (op == 0) {
from = out - dist;
do {
*out++ = *from++;
} while (--len);
continue;
}
#endif
}
from = window;
if (wnext == 0) { /* very common case */
from += wsize - op;
} else if (wnext >= op) { /* contiguous in window */
from += wnext - op;
} else { /* wrap around window */
op -= wnext;
from += wsize - op;
if (op < len) { /* some from end of window */
len -= op;
out = functable.chunkcopy_safe(out, from, op, safe);
from = window; /* more from start of window */
op = wnext;
/* This (rare) case can create a situation where
the first chunkcopy below must be checked.
*/
}
}
if (op < len) { /* still need some from output */
len -= op;
out = functable.chunkcopy_safe(out, from, op, safe);
out = functable.chunkunroll(out, &dist, &len);
out = functable.chunkcopy_safe(out, out - dist, len, safe);
} else {
out = functable.chunkcopy_safe(out, from, len, safe);
}
} else {
/* Whole reference is in range of current output. No range checks are
necessary because we start with room for at least 258 bytes of output,
so unroll and roundoff operations can write beyond `out+len` so long
as they stay within 258 bytes of `out`.
*/
if (dist >= len || dist >= state->chunksize)
out = functable.chunkcopy(out, out - dist, len);
else
out = functable.chunkmemset(out, dist, len);
}
} else if ((op & 64) == 0) { /* 2nd level distance code */
here = dcode + here->val + BITS(op);
goto dodist;
} else {
SET_BAD("invalid distance code");
break;
}
} else if ((op & 64) == 0) { /* 2nd level length code */
here = lcode + here->val + BITS(op);
goto dolen;
} else if (op & 32) { /* end-of-block */
Tracevv((stderr, "inflate: end of block\n"));
state->mode = TYPE;
break;
} else {
SET_BAD("invalid literal/length code");
break;
}
} while (in < last && out < end);
/* return unused bytes (on entry, bits < 8, so in won't go too far back) */
len = bits >> 3;
in -= len;
bits -= len << 3;
hold &= (UINT64_C(1) << bits) - 1;
/* update state and return */
strm->next_in = in;
strm->next_out = out;
strm->avail_in = (unsigned)(in < last ? (INFLATE_FAST_MIN_HAVE - 1) + (last - in)
: (INFLATE_FAST_MIN_HAVE - 1) - (in - last));
strm->avail_out = (unsigned)(out < end ? (INFLATE_FAST_MIN_LEFT - 1) + (end - out)
: (INFLATE_FAST_MIN_LEFT - 1) - (out - end));
Assert(bits <= 32, "Remaining bits greater than 32");
state->hold = (uint32_t)hold;
state->bits = bits;
return;
}
/*
inflate_fast() speedups that turned out slower (on a PowerPC G3 750CXe):
- Using bit fields for code structure
- Different op definition to avoid & for extra bits (do & for table bits)
- Three separate decoding do-loops for direct, window, and wnext == 0
- Special case for distance > 1 copies to do overlapped load and store copy
- Explicit branch predictions (based on measured branch probabilities)
- Deferring match copy and interspersed it with decoding subsequent codes
- Swapping literal/length else
- Swapping window/direct else
- Larger unrolled copy loops (three is about right)
- Moving len -= 3 statement into middle of loop
*/