Line data Source code
1 : // Copyright 2005 Google Inc. All Rights Reserved.
2 : //
3 : // Redistribution and use in source and binary forms, with or without
4 : // modification, are permitted provided that the following conditions are
5 : // met:
6 : //
7 : // * Redistributions of source code must retain the above copyright
8 : // notice, this list of conditions and the following disclaimer.
9 : // * Redistributions in binary form must reproduce the above
10 : // copyright notice, this list of conditions and the following disclaimer
11 : // in the documentation and/or other materials provided with the
12 : // distribution.
13 : // * Neither the name of Google Inc. nor the names of its
14 : // contributors may be used to endorse or promote products derived from
15 : // this software without specific prior written permission.
16 : //
17 : // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 : // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 : // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 : // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 : // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 : // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 : // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 : // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 : // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 : // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 : // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 :
29 : #include "snappy.h"
30 : #include "snappy-internal.h"
31 : #include "snappy-sinksource.h"
32 :
33 : #include <stdio.h>
34 :
35 : #include <algorithm>
36 : #include <string>
37 : #include <vector>
38 :
39 :
40 : namespace snappy {
41 :
42 : using internal::COPY_1_BYTE_OFFSET;
43 : using internal::COPY_2_BYTE_OFFSET;
44 : using internal::COPY_4_BYTE_OFFSET;
45 : using internal::LITERAL;
46 : using internal::char_table;
47 : using internal::kMaximumTagLength;
48 : using internal::wordmask;
49 :
50 : // Any hash function will produce a valid compressed bitstream, but a good
51 : // hash function reduces the number of collisions and thus yields better
52 : // compression for compressible input, and more speed for incompressible
53 : // input. Of course, it doesn't hurt if the hash function is reasonably fast
54 : // either, as it gets called a lot.
55 0 : static inline uint32 HashBytes(uint32 bytes, int shift) {
56 0 : uint32 kMul = 0x1e35a7bd;
57 0 : return (bytes * kMul) >> shift;
58 : }
59 0 : static inline uint32 Hash(const char* p, int shift) {
60 0 : return HashBytes(UNALIGNED_LOAD32(p), shift);
61 : }
62 :
63 0 : size_t MaxCompressedLength(size_t source_len) {
64 : // Compressed data can be defined as:
65 : // compressed := item* literal*
66 : // item := literal* copy
67 : //
68 : // The trailing literal sequence has a space blowup of at most 62/60
69 : // since a literal of length 60 needs one tag byte + one extra byte
70 : // for length information.
71 : //
72 : // Item blowup is trickier to measure. Suppose the "copy" op copies
73 : // 4 bytes of data. Because of a special check in the encoding code,
74 : // we produce a 4-byte copy only if the offset is < 65536. Therefore
75 : // the copy op takes 3 bytes to encode, and this type of item leads
76 : // to at most the 62/60 blowup for representing literals.
77 : //
78 : // Suppose the "copy" op copies 5 bytes of data. If the offset is big
79 : // enough, it will take 5 bytes to encode the copy op. Therefore the
80 : // worst case here is a one-byte literal followed by a five-byte copy.
81 : // I.e., 6 bytes of input turn into 7 bytes of "compressed" data.
82 : //
83 : // This last factor dominates the blowup, so the final estimate is:
84 0 : return 32 + source_len + source_len/6;
85 : }
86 :
87 : // Copy "len" bytes from "src" to "op", one byte at a time. Used for
88 : // handling COPY operations where the input and output regions may
89 : // overlap. For example, suppose:
90 : // src == "ab"
91 : // op == src + 2
92 : // len == 20
93 : // After IncrementalCopy(src, op, len), the result will have
94 : // eleven copies of "ab"
95 : // ababababababababababab
96 : // Note that this does not match the semantics of either memcpy()
97 : // or memmove().
98 0 : static inline void IncrementalCopy(const char* src, char* op, ssize_t len) {
99 0 : assert(len > 0);
100 0 : do {
101 0 : *op++ = *src++;
102 : } while (--len > 0);
103 0 : }
104 :
105 : // Equivalent to IncrementalCopy except that it can write up to ten extra
106 : // bytes after the end of the copy, and that it is faster.
107 : //
108 : // The main part of this loop is a simple copy of eight bytes at a time until
109 : // we've copied (at least) the requested amount of bytes. However, if op and
110 : // src are less than eight bytes apart (indicating a repeating pattern of
111 : // length < 8), we first need to expand the pattern in order to get the correct
112 : // results. For instance, if the buffer looks like this, with the eight-byte
113 : // <src> and <op> patterns marked as intervals:
114 : //
115 : // abxxxxxxxxxxxx
116 : // [------] src
117 : // [------] op
118 : //
119 : // a single eight-byte copy from <src> to <op> will repeat the pattern once,
120 : // after which we can move <op> two bytes without moving <src>:
121 : //
122 : // ababxxxxxxxxxx
123 : // [------] src
124 : // [------] op
125 : //
126 : // and repeat the exercise until the two no longer overlap.
127 : //
128 : // This allows us to do very well in the special case of one single byte
129 : // repeated many times, without taking a big hit for more general cases.
130 : //
131 : // The worst case of extra writing past the end of the match occurs when
132 : // op - src == 1 and len == 1; the last copy will read from byte positions
133 : // [0..7] and write to [4..11], whereas it was only supposed to write to
134 : // position 1. Thus, ten excess bytes.
135 :
136 : namespace {
137 :
138 : const int kMaxIncrementCopyOverflow = 10;
139 :
140 0 : inline void IncrementalCopyFastPath(const char* src, char* op, ssize_t len) {
141 0 : while (PREDICT_FALSE(op - src < 8)) {
142 0 : UnalignedCopy64(src, op);
143 0 : len -= op - src;
144 0 : op += op - src;
145 : }
146 0 : while (len > 0) {
147 0 : UnalignedCopy64(src, op);
148 0 : src += 8;
149 0 : op += 8;
150 0 : len -= 8;
151 : }
152 0 : }
153 :
154 : } // namespace
155 :
156 0 : static inline char* EmitLiteral(char* op,
157 : const char* literal,
158 : int len,
159 : bool allow_fast_path) {
160 0 : int n = len - 1; // Zero-length literals are disallowed
161 0 : if (n < 60) {
162 : // Fits in tag byte
163 0 : *op++ = LITERAL | (n << 2);
164 :
165 : // The vast majority of copies are below 16 bytes, for which a
166 : // call to memcpy is overkill. This fast path can sometimes
167 : // copy up to 15 bytes too much, but that is okay in the
168 : // main loop, since we have a bit to go on for both sides:
169 : //
170 : // - The input will always have kInputMarginBytes = 15 extra
171 : // available bytes, as long as we're in the main loop, and
172 : // if not, allow_fast_path = false.
173 : // - The output will always have 32 spare bytes (see
174 : // MaxCompressedLength).
175 0 : if (allow_fast_path && len <= 16) {
176 0 : UnalignedCopy64(literal, op);
177 0 : UnalignedCopy64(literal + 8, op + 8);
178 0 : return op + len;
179 : }
180 : } else {
181 : // Encode in upcoming bytes
182 0 : char* base = op;
183 0 : int count = 0;
184 0 : op++;
185 0 : while (n > 0) {
186 0 : *op++ = n & 0xff;
187 0 : n >>= 8;
188 0 : count++;
189 : }
190 0 : assert(count >= 1);
191 0 : assert(count <= 4);
192 0 : *base = LITERAL | ((59+count) << 2);
193 : }
194 0 : memcpy(op, literal, len);
195 0 : return op + len;
196 : }
197 :
198 0 : static inline char* EmitCopyLessThan64(char* op, size_t offset, int len) {
199 0 : assert(len <= 64);
200 0 : assert(len >= 4);
201 0 : assert(offset < 65536);
202 :
203 0 : if ((len < 12) && (offset < 2048)) {
204 0 : size_t len_minus_4 = len - 4;
205 0 : assert(len_minus_4 < 8); // Must fit in 3 bits
206 0 : *op++ = COPY_1_BYTE_OFFSET + ((len_minus_4) << 2) + ((offset >> 8) << 5);
207 0 : *op++ = offset & 0xff;
208 : } else {
209 0 : *op++ = COPY_2_BYTE_OFFSET + ((len-1) << 2);
210 0 : LittleEndian::Store16(op, offset);
211 0 : op += 2;
212 : }
213 0 : return op;
214 : }
215 :
216 0 : static inline char* EmitCopy(char* op, size_t offset, int len) {
217 : // Emit 64 byte copies but make sure to keep at least four bytes reserved
218 0 : while (PREDICT_FALSE(len >= 68)) {
219 0 : op = EmitCopyLessThan64(op, offset, 64);
220 0 : len -= 64;
221 : }
222 :
223 : // Emit an extra 60 byte copy if have too much data to fit in one copy
224 0 : if (len > 64) {
225 0 : op = EmitCopyLessThan64(op, offset, 60);
226 0 : len -= 60;
227 : }
228 :
229 : // Emit remainder
230 0 : op = EmitCopyLessThan64(op, offset, len);
231 0 : return op;
232 : }
233 :
234 :
235 0 : bool GetUncompressedLength(const char* start, size_t n, size_t* result) {
236 0 : uint32 v = 0;
237 0 : const char* limit = start + n;
238 0 : if (Varint::Parse32WithLimit(start, limit, &v) != NULL) {
239 0 : *result = v;
240 0 : return true;
241 : } else {
242 0 : return false;
243 : }
244 : }
245 :
246 : namespace internal {
247 0 : uint16* WorkingMemory::GetHashTable(size_t input_size, int* table_size) {
248 : // Use smaller hash table when input.size() is smaller, since we
249 : // fill the table, incurring O(hash table size) overhead for
250 : // compression, and if the input is short, we won't need that
251 : // many hash table entries anyway.
252 : assert(kMaxHashTableSize >= 256);
253 0 : size_t htsize = 256;
254 0 : while (htsize < kMaxHashTableSize && htsize < input_size) {
255 0 : htsize <<= 1;
256 : }
257 :
258 : uint16* table;
259 0 : if (htsize <= ARRAYSIZE(small_table_)) {
260 0 : table = small_table_;
261 : } else {
262 0 : if (large_table_ == NULL) {
263 0 : large_table_ = new uint16[kMaxHashTableSize];
264 : }
265 0 : table = large_table_;
266 : }
267 :
268 0 : *table_size = htsize;
269 0 : memset(table, 0, htsize * sizeof(*table));
270 0 : return table;
271 : }
272 : } // end namespace internal
273 :
274 : // For 0 <= offset <= 4, GetUint32AtOffset(GetEightBytesAt(p), offset) will
275 : // equal UNALIGNED_LOAD32(p + offset). Motivation: On x86-64 hardware we have
276 : // empirically found that overlapping loads such as
277 : // UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2)
278 : // are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to uint32.
279 : //
280 : // We have different versions for 64- and 32-bit; ideally we would avoid the
281 : // two functions and just inline the UNALIGNED_LOAD64 call into
282 : // GetUint32AtOffset, but GCC (at least not as of 4.6) is seemingly not clever
283 : // enough to avoid loading the value multiple times then. For 64-bit, the load
284 : // is done when GetEightBytesAt() is called, whereas for 32-bit, the load is
285 : // done at GetUint32AtOffset() time.
286 :
287 : #ifdef ARCH_K8
288 :
289 : typedef uint64 EightBytesReference;
290 :
291 0 : static inline EightBytesReference GetEightBytesAt(const char* ptr) {
292 0 : return UNALIGNED_LOAD64(ptr);
293 : }
294 :
295 0 : static inline uint32 GetUint32AtOffset(uint64 v, int offset) {
296 0 : assert(offset >= 0);
297 0 : assert(offset <= 4);
298 0 : return v >> (LittleEndian::IsLittleEndian() ? 8 * offset : 32 - 8 * offset);
299 : }
300 :
301 : #else
302 :
303 : typedef const char* EightBytesReference;
304 :
305 : static inline EightBytesReference GetEightBytesAt(const char* ptr) {
306 : return ptr;
307 : }
308 :
309 : static inline uint32 GetUint32AtOffset(const char* v, int offset) {
310 : assert(offset >= 0);
311 : assert(offset <= 4);
312 : return UNALIGNED_LOAD32(v + offset);
313 : }
314 :
315 : #endif
316 :
317 : // Flat array compression that does not emit the "uncompressed length"
318 : // prefix. Compresses "input" string to the "*op" buffer.
319 : //
320 : // REQUIRES: "input" is at most "kBlockSize" bytes long.
321 : // REQUIRES: "op" points to an array of memory that is at least
322 : // "MaxCompressedLength(input.size())" in size.
323 : // REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
324 : // REQUIRES: "table_size" is a power of two
325 : //
326 : // Returns an "end" pointer into "op" buffer.
327 : // "end - op" is the compressed size of "input".
328 : namespace internal {
329 0 : char* CompressFragment(const char* input,
330 : size_t input_size,
331 : char* op,
332 : uint16* table,
333 : const int table_size) {
334 : // "ip" is the input pointer, and "op" is the output pointer.
335 0 : const char* ip = input;
336 0 : assert(input_size <= kBlockSize);
337 0 : assert((table_size & (table_size - 1)) == 0); // table must be power of two
338 0 : const int shift = 32 - Bits::Log2Floor(table_size);
339 0 : assert(static_cast<int>(kuint32max >> shift) == table_size - 1);
340 0 : const char* ip_end = input + input_size;
341 0 : const char* base_ip = ip;
342 : // Bytes in [next_emit, ip) will be emitted as literal bytes. Or
343 : // [next_emit, ip_end) after the main loop.
344 0 : const char* next_emit = ip;
345 :
346 0 : const size_t kInputMarginBytes = 15;
347 0 : if (PREDICT_TRUE(input_size >= kInputMarginBytes)) {
348 0 : const char* ip_limit = input + input_size - kInputMarginBytes;
349 :
350 0 : for (uint32 next_hash = Hash(++ip, shift); ; ) {
351 0 : assert(next_emit < ip);
352 : // The body of this loop calls EmitLiteral once and then EmitCopy one or
353 : // more times. (The exception is that when we're close to exhausting
354 : // the input we goto emit_remainder.)
355 : //
356 : // In the first iteration of this loop we're just starting, so
357 : // there's nothing to copy, so calling EmitLiteral once is
358 : // necessary. And we only start a new iteration when the
359 : // current iteration has determined that a call to EmitLiteral will
360 : // precede the next call to EmitCopy (if any).
361 : //
362 : // Step 1: Scan forward in the input looking for a 4-byte-long match.
363 : // If we get close to exhausting the input then goto emit_remainder.
364 : //
365 : // Heuristic match skipping: If 32 bytes are scanned with no matches
366 : // found, start looking only at every other byte. If 32 more bytes are
367 : // scanned (or skipped), look at every third byte, etc.. When a match is
368 : // found, immediately go back to looking at every byte. This is a small
369 : // loss (~5% performance, ~0.1% density) for compressible data due to more
370 : // bookkeeping, but for non-compressible data (such as JPEG) it's a huge
371 : // win since the compressor quickly "realizes" the data is incompressible
372 : // and doesn't bother looking for matches everywhere.
373 : //
374 : // The "skip" variable keeps track of how many bytes there are since the
375 : // last match; dividing it by 32 (ie. right-shifting by five) gives the
376 : // number of bytes to move ahead for each iteration.
377 0 : uint32 skip = 32;
378 :
379 0 : const char* next_ip = ip;
380 : const char* candidate;
381 0 : do {
382 0 : ip = next_ip;
383 0 : uint32 hash = next_hash;
384 0 : assert(hash == Hash(ip, shift));
385 0 : uint32 bytes_between_hash_lookups = skip >> 5;
386 0 : skip += bytes_between_hash_lookups;
387 0 : next_ip = ip + bytes_between_hash_lookups;
388 0 : if (PREDICT_FALSE(next_ip > ip_limit)) {
389 0 : goto emit_remainder;
390 : }
391 0 : next_hash = Hash(next_ip, shift);
392 0 : candidate = base_ip + table[hash];
393 0 : assert(candidate >= base_ip);
394 0 : assert(candidate < ip);
395 :
396 0 : table[hash] = ip - base_ip;
397 0 : } while (PREDICT_TRUE(UNALIGNED_LOAD32(ip) !=
398 : UNALIGNED_LOAD32(candidate)));
399 :
400 : // Step 2: A 4-byte match has been found. We'll later see if more
401 : // than 4 bytes match. But, prior to the match, input
402 : // bytes [next_emit, ip) are unmatched. Emit them as "literal bytes."
403 0 : assert(next_emit + 16 <= ip_end);
404 0 : op = EmitLiteral(op, next_emit, ip - next_emit, true);
405 :
406 : // Step 3: Call EmitCopy, and then see if another EmitCopy could
407 : // be our next move. Repeat until we find no match for the
408 : // input immediately after what was consumed by the last EmitCopy call.
409 : //
410 : // If we exit this loop normally then we need to call EmitLiteral next,
411 : // though we don't yet know how big the literal will be. We handle that
412 : // by proceeding to the next iteration of the main loop. We also can exit
413 : // this loop via goto if we get close to exhausting the input.
414 : EightBytesReference input_bytes;
415 0 : uint32 candidate_bytes = 0;
416 :
417 0 : do {
418 : // We have a 4-byte match at ip, and no need to emit any
419 : // "literal bytes" prior to ip.
420 0 : const char* base = ip;
421 0 : int matched = 4 + FindMatchLength(candidate + 4, ip + 4, ip_end);
422 0 : ip += matched;
423 0 : size_t offset = base - candidate;
424 0 : assert(0 == memcmp(base, candidate, matched));
425 0 : op = EmitCopy(op, offset, matched);
426 : // We could immediately start working at ip now, but to improve
427 : // compression we first update table[Hash(ip - 1, ...)].
428 0 : const char* insert_tail = ip - 1;
429 0 : next_emit = ip;
430 0 : if (PREDICT_FALSE(ip >= ip_limit)) {
431 0 : goto emit_remainder;
432 : }
433 0 : input_bytes = GetEightBytesAt(insert_tail);
434 0 : uint32 prev_hash = HashBytes(GetUint32AtOffset(input_bytes, 0), shift);
435 0 : table[prev_hash] = ip - base_ip - 1;
436 0 : uint32 cur_hash = HashBytes(GetUint32AtOffset(input_bytes, 1), shift);
437 0 : candidate = base_ip + table[cur_hash];
438 0 : candidate_bytes = UNALIGNED_LOAD32(candidate);
439 0 : table[cur_hash] = ip - base_ip;
440 0 : } while (GetUint32AtOffset(input_bytes, 1) == candidate_bytes);
441 :
442 0 : next_hash = HashBytes(GetUint32AtOffset(input_bytes, 2), shift);
443 0 : ++ip;
444 0 : }
445 : }
446 :
447 : emit_remainder:
448 : // Emit the remaining bytes as a literal
449 0 : if (next_emit < ip_end) {
450 0 : op = EmitLiteral(op, next_emit, ip_end - next_emit, false);
451 : }
452 :
453 0 : return op;
454 : }
455 : } // end namespace internal
456 :
457 : // Signature of output types needed by decompression code.
458 : // The decompression code is templatized on a type that obeys this
459 : // signature so that we do not pay virtual function call overhead in
460 : // the middle of a tight decompression loop.
461 : //
462 : // class DecompressionWriter {
463 : // public:
464 : // // Called before decompression
465 : // void SetExpectedLength(size_t length);
466 : //
467 : // // Called after decompression
468 : // bool CheckLength() const;
469 : //
470 : // // Called repeatedly during decompression
471 : // bool Append(const char* ip, size_t length);
472 : // bool AppendFromSelf(uint32 offset, size_t length);
473 : //
474 : // // The rules for how TryFastAppend differs from Append are somewhat
475 : // // convoluted:
476 : // //
477 : // // - TryFastAppend is allowed to decline (return false) at any
478 : // // time, for any reason -- just "return false" would be
479 : // // a perfectly legal implementation of TryFastAppend.
480 : // // The intention is for TryFastAppend to allow a fast path
481 : // // in the common case of a small append.
482 : // // - TryFastAppend is allowed to read up to <available> bytes
483 : // // from the input buffer, whereas Append is allowed to read
484 : // // <length>. However, if it returns true, it must leave
485 : // // at least five (kMaximumTagLength) bytes in the input buffer
486 : // // afterwards, so that there is always enough space to read the
487 : // // next tag without checking for a refill.
488 : // // - TryFastAppend must always return decline (return false)
489 : // // if <length> is 61 or more, as in this case the literal length is not
490 : // // decoded fully. In practice, this should not be a big problem,
491 : // // as it is unlikely that one would implement a fast path accepting
492 : // // this much data.
493 : // //
494 : // bool TryFastAppend(const char* ip, size_t available, size_t length);
495 : // };
496 :
497 :
498 : // Helper class for decompression
499 : class SnappyDecompressor {
500 : private:
501 : Source* reader_; // Underlying source of bytes to decompress
502 : const char* ip_; // Points to next buffered byte
503 : const char* ip_limit_; // Points just past buffered bytes
504 : uint32 peeked_; // Bytes peeked from reader (need to skip)
505 : bool eof_; // Hit end of input without an error?
506 : char scratch_[kMaximumTagLength]; // See RefillTag().
507 :
508 : // Ensure that all of the tag metadata for the next tag is available
509 : // in [ip_..ip_limit_-1]. Also ensures that [ip,ip+4] is readable even
510 : // if (ip_limit_ - ip_ < 5).
511 : //
512 : // Returns true on success, false on error or end of input.
513 : bool RefillTag();
514 :
515 : public:
516 0 : explicit SnappyDecompressor(Source* reader)
517 0 : : reader_(reader),
518 : ip_(NULL),
519 : ip_limit_(NULL),
520 : peeked_(0),
521 0 : eof_(false) {
522 0 : }
523 :
524 0 : ~SnappyDecompressor() {
525 : // Advance past any bytes we peeked at from the reader
526 0 : reader_->Skip(peeked_);
527 0 : }
528 :
529 : // Returns true iff we have hit the end of the input without an error.
530 0 : bool eof() const {
531 0 : return eof_;
532 : }
533 :
534 : // Read the uncompressed length stored at the start of the compressed data.
535 : // On succcess, stores the length in *result and returns true.
536 : // On failure, returns false.
537 0 : bool ReadUncompressedLength(uint32* result) {
538 0 : assert(ip_ == NULL); // Must not have read anything yet
539 : // Length is encoded in 1..5 bytes
540 0 : *result = 0;
541 0 : uint32 shift = 0;
542 : while (true) {
543 0 : if (shift >= 32) return false;
544 : size_t n;
545 0 : const char* ip = reader_->Peek(&n);
546 0 : if (n == 0) return false;
547 0 : const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
548 0 : reader_->Skip(1);
549 0 : uint32 val = c & 0x7f;
550 0 : if (((val << shift) >> shift) != val) return false;
551 0 : *result |= val << shift;
552 0 : if (c < 128) {
553 0 : break;
554 : }
555 0 : shift += 7;
556 0 : }
557 0 : return true;
558 : }
559 :
560 : // Process the next item found in the input.
561 : // Returns true if successful, false on error or end of input.
562 : template <class Writer>
563 0 : void DecompressAllTags(Writer* writer) {
564 0 : const char* ip = ip_;
565 :
566 : // We could have put this refill fragment only at the beginning of the loop.
567 : // However, duplicating it at the end of each branch gives the compiler more
568 : // scope to optimize the <ip_limit_ - ip> expression based on the local
569 : // context, which overall increases speed.
570 : #define MAYBE_REFILL() \
571 : if (ip_limit_ - ip < kMaximumTagLength) { \
572 : ip_ = ip; \
573 : if (!RefillTag()) return; \
574 : ip = ip_; \
575 : }
576 :
577 0 : MAYBE_REFILL();
578 0 : for ( ;; ) {
579 0 : const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip++));
580 :
581 0 : if ((c & 0x3) == LITERAL) {
582 0 : size_t literal_length = (c >> 2) + 1u;
583 0 : if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length)) {
584 0 : assert(literal_length < 61);
585 0 : ip += literal_length;
586 : // NOTE(user): There is no MAYBE_REFILL() here, as TryFastAppend()
587 : // will not return true unless there's already at least five spare
588 : // bytes in addition to the literal.
589 0 : continue;
590 : }
591 0 : if (PREDICT_FALSE(literal_length >= 61)) {
592 : // Long literal.
593 0 : const size_t literal_length_length = literal_length - 60;
594 0 : literal_length =
595 0 : (LittleEndian::Load32(ip) & wordmask[literal_length_length]) + 1;
596 0 : ip += literal_length_length;
597 : }
598 :
599 0 : size_t avail = ip_limit_ - ip;
600 0 : while (avail < literal_length) {
601 0 : if (!writer->Append(ip, avail)) return;
602 0 : literal_length -= avail;
603 0 : reader_->Skip(peeked_);
604 : size_t n;
605 0 : ip = reader_->Peek(&n);
606 0 : avail = n;
607 0 : peeked_ = avail;
608 0 : if (avail == 0) return; // Premature end of input
609 0 : ip_limit_ = ip + avail;
610 : }
611 0 : if (!writer->Append(ip, literal_length)) {
612 0 : return;
613 : }
614 0 : ip += literal_length;
615 0 : MAYBE_REFILL();
616 : } else {
617 0 : const uint32 entry = char_table[c];
618 0 : const uint32 trailer = LittleEndian::Load32(ip) & wordmask[entry >> 11];
619 0 : const uint32 length = entry & 0xff;
620 0 : ip += entry >> 11;
621 :
622 : // copy_offset/256 is encoded in bits 8..10. By just fetching
623 : // those bits, we get copy_offset (since the bit-field starts at
624 : // bit 8).
625 0 : const uint32 copy_offset = entry & 0x700;
626 0 : if (!writer->AppendFromSelf(copy_offset + trailer, length)) {
627 0 : return;
628 : }
629 0 : MAYBE_REFILL();
630 : }
631 : }
632 :
633 : #undef MAYBE_REFILL
634 : }
635 : };
636 :
637 0 : bool SnappyDecompressor::RefillTag() {
638 0 : const char* ip = ip_;
639 0 : if (ip == ip_limit_) {
640 : // Fetch a new fragment from the reader
641 0 : reader_->Skip(peeked_); // All peeked bytes are used up
642 : size_t n;
643 0 : ip = reader_->Peek(&n);
644 0 : peeked_ = n;
645 0 : if (n == 0) {
646 0 : eof_ = true;
647 0 : return false;
648 : }
649 0 : ip_limit_ = ip + n;
650 : }
651 :
652 : // Read the tag character
653 0 : assert(ip < ip_limit_);
654 0 : const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
655 0 : const uint32 entry = char_table[c];
656 0 : const uint32 needed = (entry >> 11) + 1; // +1 byte for 'c'
657 0 : assert(needed <= sizeof(scratch_));
658 :
659 : // Read more bytes from reader if needed
660 0 : uint32 nbuf = ip_limit_ - ip;
661 0 : if (nbuf < needed) {
662 : // Stitch together bytes from ip and reader to form the word
663 : // contents. We store the needed bytes in "scratch_". They
664 : // will be consumed immediately by the caller since we do not
665 : // read more than we need.
666 0 : memmove(scratch_, ip, nbuf);
667 0 : reader_->Skip(peeked_); // All peeked bytes are used up
668 0 : peeked_ = 0;
669 0 : while (nbuf < needed) {
670 : size_t length;
671 0 : const char* src = reader_->Peek(&length);
672 0 : if (length == 0) return false;
673 0 : uint32 to_add = min<uint32>(needed - nbuf, length);
674 0 : memcpy(scratch_ + nbuf, src, to_add);
675 0 : nbuf += to_add;
676 0 : reader_->Skip(to_add);
677 : }
678 0 : assert(nbuf == needed);
679 0 : ip_ = scratch_;
680 0 : ip_limit_ = scratch_ + needed;
681 0 : } else if (nbuf < kMaximumTagLength) {
682 : // Have enough bytes, but move into scratch_ so that we do not
683 : // read past end of input
684 0 : memmove(scratch_, ip, nbuf);
685 0 : reader_->Skip(peeked_); // All peeked bytes are used up
686 0 : peeked_ = 0;
687 0 : ip_ = scratch_;
688 0 : ip_limit_ = scratch_ + nbuf;
689 : } else {
690 : // Pass pointer to buffer returned by reader_.
691 0 : ip_ = ip;
692 : }
693 0 : return true;
694 : }
695 :
696 : template <typename Writer>
697 0 : static bool InternalUncompress(Source* r, Writer* writer) {
698 : // Read the uncompressed length from the front of the compressed input
699 0 : SnappyDecompressor decompressor(r);
700 0 : uint32 uncompressed_len = 0;
701 0 : if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false;
702 0 : return InternalUncompressAllTags(&decompressor, writer, uncompressed_len);
703 : }
704 :
705 : template <typename Writer>
706 0 : static bool InternalUncompressAllTags(SnappyDecompressor* decompressor,
707 : Writer* writer,
708 : uint32 uncompressed_len) {
709 0 : writer->SetExpectedLength(uncompressed_len);
710 :
711 : // Process the entire input
712 0 : decompressor->DecompressAllTags(writer);
713 0 : writer->Flush();
714 0 : return (decompressor->eof() && writer->CheckLength());
715 : }
716 :
717 0 : bool GetUncompressedLength(Source* source, uint32* result) {
718 0 : SnappyDecompressor decompressor(source);
719 0 : return decompressor.ReadUncompressedLength(result);
720 : }
721 :
722 0 : size_t Compress(Source* reader, Sink* writer) {
723 0 : size_t written = 0;
724 0 : size_t N = reader->Available();
725 : char ulength[Varint::kMax32];
726 0 : char* p = Varint::Encode32(ulength, N);
727 0 : writer->Append(ulength, p-ulength);
728 0 : written += (p - ulength);
729 :
730 0 : internal::WorkingMemory wmem;
731 0 : char* scratch = NULL;
732 0 : char* scratch_output = NULL;
733 :
734 0 : while (N > 0) {
735 : // Get next block to compress (without copying if possible)
736 : size_t fragment_size;
737 0 : const char* fragment = reader->Peek(&fragment_size);
738 0 : assert(fragment_size != 0); // premature end of input
739 0 : const size_t num_to_read = min(N, kBlockSize);
740 0 : size_t bytes_read = fragment_size;
741 :
742 0 : size_t pending_advance = 0;
743 0 : if (bytes_read >= num_to_read) {
744 : // Buffer returned by reader is large enough
745 0 : pending_advance = num_to_read;
746 0 : fragment_size = num_to_read;
747 : } else {
748 : // Read into scratch buffer
749 0 : if (scratch == NULL) {
750 : // If this is the last iteration, we want to allocate N bytes
751 : // of space, otherwise the max possible kBlockSize space.
752 : // num_to_read contains exactly the correct value
753 0 : scratch = new char[num_to_read];
754 : }
755 0 : memcpy(scratch, fragment, bytes_read);
756 0 : reader->Skip(bytes_read);
757 :
758 0 : while (bytes_read < num_to_read) {
759 0 : fragment = reader->Peek(&fragment_size);
760 0 : size_t n = min<size_t>(fragment_size, num_to_read - bytes_read);
761 0 : memcpy(scratch + bytes_read, fragment, n);
762 0 : bytes_read += n;
763 0 : reader->Skip(n);
764 : }
765 0 : assert(bytes_read == num_to_read);
766 0 : fragment = scratch;
767 0 : fragment_size = num_to_read;
768 : }
769 0 : assert(fragment_size == num_to_read);
770 :
771 : // Get encoding table for compression
772 : int table_size;
773 0 : uint16* table = wmem.GetHashTable(num_to_read, &table_size);
774 :
775 : // Compress input_fragment and append to dest
776 0 : const int max_output = MaxCompressedLength(num_to_read);
777 :
778 : // Need a scratch buffer for the output, in case the byte sink doesn't
779 : // have room for us directly.
780 0 : if (scratch_output == NULL) {
781 0 : scratch_output = new char[max_output];
782 : } else {
783 : // Since we encode kBlockSize regions followed by a region
784 : // which is <= kBlockSize in length, a previously allocated
785 : // scratch_output[] region is big enough for this iteration.
786 : }
787 0 : char* dest = writer->GetAppendBuffer(max_output, scratch_output);
788 0 : char* end = internal::CompressFragment(fragment, fragment_size,
789 0 : dest, table, table_size);
790 0 : writer->Append(dest, end - dest);
791 0 : written += (end - dest);
792 :
793 0 : N -= num_to_read;
794 0 : reader->Skip(pending_advance);
795 : }
796 :
797 0 : delete[] scratch;
798 0 : delete[] scratch_output;
799 :
800 0 : return written;
801 : }
802 :
803 : // -----------------------------------------------------------------------
804 : // IOVec interfaces
805 : // -----------------------------------------------------------------------
806 :
807 : // A type that writes to an iovec.
808 : // Note that this is not a "ByteSink", but a type that matches the
809 : // Writer template argument to SnappyDecompressor::DecompressAllTags().
810 : class SnappyIOVecWriter {
811 : private:
812 : const struct iovec* output_iov_;
813 : const size_t output_iov_count_;
814 :
815 : // We are currently writing into output_iov_[curr_iov_index_].
816 : size_t curr_iov_index_;
817 :
818 : // Bytes written to output_iov_[curr_iov_index_] so far.
819 : size_t curr_iov_written_;
820 :
821 : // Total bytes decompressed into output_iov_ so far.
822 : size_t total_written_;
823 :
824 : // Maximum number of bytes that will be decompressed into output_iov_.
825 : size_t output_limit_;
826 :
827 0 : inline char* GetIOVecPointer(size_t index, size_t offset) {
828 0 : return reinterpret_cast<char*>(output_iov_[index].iov_base) +
829 0 : offset;
830 : }
831 :
832 : public:
833 : // Does not take ownership of iov. iov must be valid during the
834 : // entire lifetime of the SnappyIOVecWriter.
835 0 : inline SnappyIOVecWriter(const struct iovec* iov, size_t iov_count)
836 0 : : output_iov_(iov),
837 : output_iov_count_(iov_count),
838 : curr_iov_index_(0),
839 : curr_iov_written_(0),
840 : total_written_(0),
841 0 : output_limit_(-1) {
842 0 : }
843 :
844 0 : inline void SetExpectedLength(size_t len) {
845 0 : output_limit_ = len;
846 0 : }
847 :
848 0 : inline bool CheckLength() const {
849 0 : return total_written_ == output_limit_;
850 : }
851 :
852 0 : inline bool Append(const char* ip, size_t len) {
853 0 : if (total_written_ + len > output_limit_) {
854 0 : return false;
855 : }
856 :
857 0 : while (len > 0) {
858 0 : assert(curr_iov_written_ <= output_iov_[curr_iov_index_].iov_len);
859 0 : if (curr_iov_written_ >= output_iov_[curr_iov_index_].iov_len) {
860 : // This iovec is full. Go to the next one.
861 0 : if (curr_iov_index_ + 1 >= output_iov_count_) {
862 0 : return false;
863 : }
864 0 : curr_iov_written_ = 0;
865 0 : ++curr_iov_index_;
866 : }
867 :
868 : const size_t to_write = std::min(
869 0 : len, output_iov_[curr_iov_index_].iov_len - curr_iov_written_);
870 0 : memcpy(GetIOVecPointer(curr_iov_index_, curr_iov_written_),
871 : ip,
872 0 : to_write);
873 0 : curr_iov_written_ += to_write;
874 0 : total_written_ += to_write;
875 0 : ip += to_write;
876 0 : len -= to_write;
877 : }
878 :
879 0 : return true;
880 : }
881 :
882 0 : inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
883 0 : const size_t space_left = output_limit_ - total_written_;
884 0 : if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16 &&
885 0 : output_iov_[curr_iov_index_].iov_len - curr_iov_written_ >= 16) {
886 : // Fast path, used for the majority (about 95%) of invocations.
887 0 : char* ptr = GetIOVecPointer(curr_iov_index_, curr_iov_written_);
888 0 : UnalignedCopy64(ip, ptr);
889 0 : UnalignedCopy64(ip + 8, ptr + 8);
890 0 : curr_iov_written_ += len;
891 0 : total_written_ += len;
892 0 : return true;
893 : }
894 :
895 0 : return false;
896 : }
897 :
898 0 : inline bool AppendFromSelf(size_t offset, size_t len) {
899 0 : if (offset > total_written_ || offset == 0) {
900 0 : return false;
901 : }
902 0 : const size_t space_left = output_limit_ - total_written_;
903 0 : if (len > space_left) {
904 0 : return false;
905 : }
906 :
907 : // Locate the iovec from which we need to start the copy.
908 0 : size_t from_iov_index = curr_iov_index_;
909 0 : size_t from_iov_offset = curr_iov_written_;
910 0 : while (offset > 0) {
911 0 : if (from_iov_offset >= offset) {
912 0 : from_iov_offset -= offset;
913 0 : break;
914 : }
915 :
916 0 : offset -= from_iov_offset;
917 0 : assert(from_iov_index > 0);
918 0 : --from_iov_index;
919 0 : from_iov_offset = output_iov_[from_iov_index].iov_len;
920 : }
921 :
922 : // Copy <len> bytes starting from the iovec pointed to by from_iov_index to
923 : // the current iovec.
924 0 : while (len > 0) {
925 0 : assert(from_iov_index <= curr_iov_index_);
926 0 : if (from_iov_index != curr_iov_index_) {
927 : const size_t to_copy = std::min(
928 0 : output_iov_[from_iov_index].iov_len - from_iov_offset,
929 0 : len);
930 0 : Append(GetIOVecPointer(from_iov_index, from_iov_offset), to_copy);
931 0 : len -= to_copy;
932 0 : if (len > 0) {
933 0 : ++from_iov_index;
934 0 : from_iov_offset = 0;
935 : }
936 : } else {
937 0 : assert(curr_iov_written_ <= output_iov_[curr_iov_index_].iov_len);
938 0 : size_t to_copy = std::min(output_iov_[curr_iov_index_].iov_len -
939 0 : curr_iov_written_,
940 0 : len);
941 0 : if (to_copy == 0) {
942 : // This iovec is full. Go to the next one.
943 0 : if (curr_iov_index_ + 1 >= output_iov_count_) {
944 0 : return false;
945 : }
946 0 : ++curr_iov_index_;
947 0 : curr_iov_written_ = 0;
948 0 : continue;
949 : }
950 0 : if (to_copy > len) {
951 0 : to_copy = len;
952 : }
953 0 : IncrementalCopy(GetIOVecPointer(from_iov_index, from_iov_offset),
954 : GetIOVecPointer(curr_iov_index_, curr_iov_written_),
955 0 : to_copy);
956 0 : curr_iov_written_ += to_copy;
957 0 : from_iov_offset += to_copy;
958 0 : total_written_ += to_copy;
959 0 : len -= to_copy;
960 : }
961 : }
962 :
963 0 : return true;
964 : }
965 :
966 0 : inline void Flush() {}
967 : };
968 :
969 0 : bool RawUncompressToIOVec(const char* compressed, size_t compressed_length,
970 : const struct iovec* iov, size_t iov_cnt) {
971 0 : ByteArraySource reader(compressed, compressed_length);
972 0 : return RawUncompressToIOVec(&reader, iov, iov_cnt);
973 : }
974 :
975 0 : bool RawUncompressToIOVec(Source* compressed, const struct iovec* iov,
976 : size_t iov_cnt) {
977 0 : SnappyIOVecWriter output(iov, iov_cnt);
978 0 : return InternalUncompress(compressed, &output);
979 : }
980 :
981 : // -----------------------------------------------------------------------
982 : // Flat array interfaces
983 : // -----------------------------------------------------------------------
984 :
985 : // A type that writes to a flat array.
986 : // Note that this is not a "ByteSink", but a type that matches the
987 : // Writer template argument to SnappyDecompressor::DecompressAllTags().
988 : class SnappyArrayWriter {
989 : private:
990 : char* base_;
991 : char* op_;
992 : char* op_limit_;
993 :
994 : public:
995 0 : inline explicit SnappyArrayWriter(char* dst)
996 0 : : base_(dst),
997 : op_(dst),
998 0 : op_limit_(dst) {
999 0 : }
1000 :
1001 0 : inline void SetExpectedLength(size_t len) {
1002 0 : op_limit_ = op_ + len;
1003 0 : }
1004 :
1005 0 : inline bool CheckLength() const {
1006 0 : return op_ == op_limit_;
1007 : }
1008 :
1009 0 : inline bool Append(const char* ip, size_t len) {
1010 0 : char* op = op_;
1011 0 : const size_t space_left = op_limit_ - op;
1012 0 : if (space_left < len) {
1013 0 : return false;
1014 : }
1015 0 : memcpy(op, ip, len);
1016 0 : op_ = op + len;
1017 0 : return true;
1018 : }
1019 :
1020 0 : inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
1021 0 : char* op = op_;
1022 0 : const size_t space_left = op_limit_ - op;
1023 0 : if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16) {
1024 : // Fast path, used for the majority (about 95%) of invocations.
1025 0 : UnalignedCopy64(ip, op);
1026 0 : UnalignedCopy64(ip + 8, op + 8);
1027 0 : op_ = op + len;
1028 0 : return true;
1029 : } else {
1030 0 : return false;
1031 : }
1032 : }
1033 :
1034 0 : inline bool AppendFromSelf(size_t offset, size_t len) {
1035 0 : char* op = op_;
1036 0 : const size_t space_left = op_limit_ - op;
1037 :
1038 : // Check if we try to append from before the start of the buffer.
1039 : // Normally this would just be a check for "produced < offset",
1040 : // but "produced <= offset - 1u" is equivalent for every case
1041 : // except the one where offset==0, where the right side will wrap around
1042 : // to a very big number. This is convenient, as offset==0 is another
1043 : // invalid case that we also want to catch, so that we do not go
1044 : // into an infinite loop.
1045 0 : assert(op >= base_);
1046 0 : size_t produced = op - base_;
1047 0 : if (produced <= offset - 1u) {
1048 0 : return false;
1049 : }
1050 0 : if (len <= 16 && offset >= 8 && space_left >= 16) {
1051 : // Fast path, used for the majority (70-80%) of dynamic invocations.
1052 0 : UnalignedCopy64(op - offset, op);
1053 0 : UnalignedCopy64(op - offset + 8, op + 8);
1054 : } else {
1055 0 : if (space_left >= len + kMaxIncrementCopyOverflow) {
1056 0 : IncrementalCopyFastPath(op - offset, op, len);
1057 : } else {
1058 0 : if (space_left < len) {
1059 0 : return false;
1060 : }
1061 0 : IncrementalCopy(op - offset, op, len);
1062 : }
1063 : }
1064 :
1065 0 : op_ = op + len;
1066 0 : return true;
1067 : }
1068 0 : inline size_t Produced() const {
1069 0 : return op_ - base_;
1070 : }
1071 0 : inline void Flush() {}
1072 : };
1073 :
1074 0 : bool RawUncompress(const char* compressed, size_t n, char* uncompressed) {
1075 0 : ByteArraySource reader(compressed, n);
1076 0 : return RawUncompress(&reader, uncompressed);
1077 : }
1078 :
1079 0 : bool RawUncompress(Source* compressed, char* uncompressed) {
1080 0 : SnappyArrayWriter output(uncompressed);
1081 0 : return InternalUncompress(compressed, &output);
1082 : }
1083 :
1084 0 : bool Uncompress(const char* compressed, size_t n, string* uncompressed) {
1085 : size_t ulength;
1086 0 : if (!GetUncompressedLength(compressed, n, &ulength)) {
1087 0 : return false;
1088 : }
1089 : // On 32-bit builds: max_size() < kuint32max. Check for that instead
1090 : // of crashing (e.g., consider externally specified compressed data).
1091 0 : if (ulength > uncompressed->max_size()) {
1092 0 : return false;
1093 : }
1094 0 : STLStringResizeUninitialized(uncompressed, ulength);
1095 0 : return RawUncompress(compressed, n, string_as_array(uncompressed));
1096 : }
1097 :
1098 : // A Writer that drops everything on the floor and just does validation
1099 : class SnappyDecompressionValidator {
1100 : private:
1101 : size_t expected_;
1102 : size_t produced_;
1103 :
1104 : public:
1105 0 : inline SnappyDecompressionValidator() : expected_(0), produced_(0) { }
1106 0 : inline void SetExpectedLength(size_t len) {
1107 0 : expected_ = len;
1108 0 : }
1109 0 : inline bool CheckLength() const {
1110 0 : return expected_ == produced_;
1111 : }
1112 0 : inline bool Append(const char* ip, size_t len) {
1113 0 : produced_ += len;
1114 0 : return produced_ <= expected_;
1115 : }
1116 0 : inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
1117 0 : return false;
1118 : }
1119 0 : inline bool AppendFromSelf(size_t offset, size_t len) {
1120 : // See SnappyArrayWriter::AppendFromSelf for an explanation of
1121 : // the "offset - 1u" trick.
1122 0 : if (produced_ <= offset - 1u) return false;
1123 0 : produced_ += len;
1124 0 : return produced_ <= expected_;
1125 : }
1126 0 : inline void Flush() {}
1127 : };
1128 :
1129 0 : bool IsValidCompressedBuffer(const char* compressed, size_t n) {
1130 0 : ByteArraySource reader(compressed, n);
1131 0 : SnappyDecompressionValidator writer;
1132 0 : return InternalUncompress(&reader, &writer);
1133 : }
1134 :
1135 0 : bool IsValidCompressed(Source* compressed) {
1136 0 : SnappyDecompressionValidator writer;
1137 0 : return InternalUncompress(compressed, &writer);
1138 : }
1139 :
1140 0 : void RawCompress(const char* input,
1141 : size_t input_length,
1142 : char* compressed,
1143 : size_t* compressed_length) {
1144 0 : ByteArraySource reader(input, input_length);
1145 0 : UncheckedByteArraySink writer(compressed);
1146 0 : Compress(&reader, &writer);
1147 :
1148 : // Compute how many bytes were added
1149 0 : *compressed_length = (writer.CurrentDestination() - compressed);
1150 0 : }
1151 :
1152 0 : size_t Compress(const char* input, size_t input_length, string* compressed) {
1153 : // Pre-grow the buffer to the max length of the compressed output
1154 0 : compressed->resize(MaxCompressedLength(input_length));
1155 :
1156 : size_t compressed_length;
1157 0 : RawCompress(input, input_length, string_as_array(compressed),
1158 0 : &compressed_length);
1159 0 : compressed->resize(compressed_length);
1160 0 : return compressed_length;
1161 : }
1162 :
1163 : // -----------------------------------------------------------------------
1164 : // Sink interface
1165 : // -----------------------------------------------------------------------
1166 :
1167 : // A type that decompresses into a Sink. The template parameter
1168 : // Allocator must export one method "char* Allocate(int size);", which
1169 : // allocates a buffer of "size" and appends that to the destination.
1170 : template <typename Allocator>
1171 0 : class SnappyScatteredWriter {
1172 : Allocator allocator_;
1173 :
1174 : // We need random access into the data generated so far. Therefore
1175 : // we keep track of all of the generated data as an array of blocks.
1176 : // All of the blocks except the last have length kBlockSize.
1177 : vector<char*> blocks_;
1178 : size_t expected_;
1179 :
1180 : // Total size of all fully generated blocks so far
1181 : size_t full_size_;
1182 :
1183 : // Pointer into current output block
1184 : char* op_base_; // Base of output block
1185 : char* op_ptr_; // Pointer to next unfilled byte in block
1186 : char* op_limit_; // Pointer just past block
1187 :
1188 0 : inline size_t Size() const {
1189 0 : return full_size_ + (op_ptr_ - op_base_);
1190 : }
1191 :
1192 : bool SlowAppend(const char* ip, size_t len);
1193 : bool SlowAppendFromSelf(size_t offset, size_t len);
1194 :
1195 : public:
1196 0 : inline explicit SnappyScatteredWriter(const Allocator& allocator)
1197 : : allocator_(allocator),
1198 : full_size_(0),
1199 : op_base_(NULL),
1200 : op_ptr_(NULL),
1201 0 : op_limit_(NULL) {
1202 0 : }
1203 :
1204 0 : inline void SetExpectedLength(size_t len) {
1205 0 : assert(blocks_.empty());
1206 0 : expected_ = len;
1207 0 : }
1208 :
1209 0 : inline bool CheckLength() const {
1210 0 : return Size() == expected_;
1211 : }
1212 :
1213 : // Return the number of bytes actually uncompressed so far
1214 0 : inline size_t Produced() const {
1215 0 : return Size();
1216 : }
1217 :
1218 0 : inline bool Append(const char* ip, size_t len) {
1219 0 : size_t avail = op_limit_ - op_ptr_;
1220 0 : if (len <= avail) {
1221 : // Fast path
1222 0 : memcpy(op_ptr_, ip, len);
1223 0 : op_ptr_ += len;
1224 0 : return true;
1225 : } else {
1226 0 : return SlowAppend(ip, len);
1227 : }
1228 : }
1229 :
1230 0 : inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
1231 0 : char* op = op_ptr_;
1232 0 : const int space_left = op_limit_ - op;
1233 0 : if (length <= 16 && available >= 16 + kMaximumTagLength &&
1234 : space_left >= 16) {
1235 : // Fast path, used for the majority (about 95%) of invocations.
1236 0 : UNALIGNED_STORE64(op, UNALIGNED_LOAD64(ip));
1237 0 : UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(ip + 8));
1238 0 : op_ptr_ = op + length;
1239 0 : return true;
1240 : } else {
1241 0 : return false;
1242 : }
1243 : }
1244 :
1245 0 : inline bool AppendFromSelf(size_t offset, size_t len) {
1246 : // See SnappyArrayWriter::AppendFromSelf for an explanation of
1247 : // the "offset - 1u" trick.
1248 0 : if (offset - 1u < op_ptr_ - op_base_) {
1249 0 : const size_t space_left = op_limit_ - op_ptr_;
1250 0 : if (space_left >= len + kMaxIncrementCopyOverflow) {
1251 : // Fast path: src and dst in current block.
1252 0 : IncrementalCopyFastPath(op_ptr_ - offset, op_ptr_, len);
1253 0 : op_ptr_ += len;
1254 0 : return true;
1255 : }
1256 : }
1257 0 : return SlowAppendFromSelf(offset, len);
1258 : }
1259 :
1260 : // Called at the end of the decompress. We ask the allocator
1261 : // write all blocks to the sink.
1262 0 : inline void Flush() { allocator_.Flush(Produced()); }
1263 : };
1264 :
1265 : template<typename Allocator>
1266 0 : bool SnappyScatteredWriter<Allocator>::SlowAppend(const char* ip, size_t len) {
1267 0 : size_t avail = op_limit_ - op_ptr_;
1268 0 : while (len > avail) {
1269 : // Completely fill this block
1270 0 : memcpy(op_ptr_, ip, avail);
1271 0 : op_ptr_ += avail;
1272 0 : assert(op_limit_ - op_ptr_ == 0);
1273 0 : full_size_ += (op_ptr_ - op_base_);
1274 0 : len -= avail;
1275 0 : ip += avail;
1276 :
1277 : // Bounds check
1278 0 : if (full_size_ + len > expected_) {
1279 0 : return false;
1280 : }
1281 :
1282 : // Make new block
1283 0 : size_t bsize = min<size_t>(kBlockSize, expected_ - full_size_);
1284 0 : op_base_ = allocator_.Allocate(bsize);
1285 0 : op_ptr_ = op_base_;
1286 0 : op_limit_ = op_base_ + bsize;
1287 0 : blocks_.push_back(op_base_);
1288 0 : avail = bsize;
1289 : }
1290 :
1291 0 : memcpy(op_ptr_, ip, len);
1292 0 : op_ptr_ += len;
1293 0 : return true;
1294 : }
1295 :
1296 : template<typename Allocator>
1297 0 : bool SnappyScatteredWriter<Allocator>::SlowAppendFromSelf(size_t offset,
1298 : size_t len) {
1299 : // Overflow check
1300 : // See SnappyArrayWriter::AppendFromSelf for an explanation of
1301 : // the "offset - 1u" trick.
1302 0 : const size_t cur = Size();
1303 0 : if (offset - 1u >= cur) return false;
1304 0 : if (expected_ - cur < len) return false;
1305 :
1306 : // Currently we shouldn't ever hit this path because Compress() chops the
1307 : // input into blocks and does not create cross-block copies. However, it is
1308 : // nice if we do not rely on that, since we can get better compression if we
1309 : // allow cross-block copies and thus might want to change the compressor in
1310 : // the future.
1311 0 : size_t src = cur - offset;
1312 0 : while (len-- > 0) {
1313 0 : char c = blocks_[src >> kBlockLog][src & (kBlockSize-1)];
1314 0 : Append(&c, 1);
1315 0 : src++;
1316 : }
1317 0 : return true;
1318 : }
1319 :
1320 0 : class SnappySinkAllocator {
1321 : public:
1322 0 : explicit SnappySinkAllocator(Sink* dest): dest_(dest) {}
1323 0 : ~SnappySinkAllocator() {}
1324 :
1325 0 : char* Allocate(int size) {
1326 0 : Datablock block(new char[size], size);
1327 0 : blocks_.push_back(block);
1328 0 : return block.data;
1329 : }
1330 :
1331 : // We flush only at the end, because the writer wants
1332 : // random access to the blocks and once we hand the
1333 : // block over to the sink, we can't access it anymore.
1334 : // Also we don't write more than has been actually written
1335 : // to the blocks.
1336 0 : void Flush(size_t size) {
1337 0 : size_t size_written = 0;
1338 : size_t block_size;
1339 0 : for (int i = 0; i < blocks_.size(); ++i) {
1340 0 : block_size = min<size_t>(blocks_[i].size, size - size_written);
1341 0 : dest_->AppendAndTakeOwnership(blocks_[i].data, block_size,
1342 0 : &SnappySinkAllocator::Deleter, NULL);
1343 0 : size_written += block_size;
1344 : }
1345 0 : blocks_.clear();
1346 0 : }
1347 :
1348 : private:
1349 : struct Datablock {
1350 : char* data;
1351 : size_t size;
1352 0 : Datablock(char* p, size_t s) : data(p), size(s) {}
1353 : };
1354 :
1355 0 : static void Deleter(void* arg, const char* bytes, size_t size) {
1356 0 : delete[] bytes;
1357 0 : }
1358 :
1359 : Sink* dest_;
1360 : vector<Datablock> blocks_;
1361 :
1362 : // Note: copying this object is allowed
1363 : };
1364 :
1365 0 : size_t UncompressAsMuchAsPossible(Source* compressed, Sink* uncompressed) {
1366 0 : SnappySinkAllocator allocator(uncompressed);
1367 0 : SnappyScatteredWriter<SnappySinkAllocator> writer(allocator);
1368 0 : InternalUncompress(compressed, &writer);
1369 0 : return writer.Produced();
1370 : }
1371 :
1372 0 : bool Uncompress(Source* compressed, Sink* uncompressed) {
1373 : // Read the uncompressed length from the front of the compressed input
1374 0 : SnappyDecompressor decompressor(compressed);
1375 0 : uint32 uncompressed_len = 0;
1376 0 : if (!decompressor.ReadUncompressedLength(&uncompressed_len)) {
1377 0 : return false;
1378 : }
1379 :
1380 : char c;
1381 : size_t allocated_size;
1382 0 : char* buf = uncompressed->GetAppendBufferVariable(
1383 0 : 1, uncompressed_len, &c, 1, &allocated_size);
1384 :
1385 : // If we can get a flat buffer, then use it, otherwise do block by block
1386 : // uncompression
1387 0 : if (allocated_size >= uncompressed_len) {
1388 0 : SnappyArrayWriter writer(buf);
1389 0 : bool result = InternalUncompressAllTags(
1390 0 : &decompressor, &writer, uncompressed_len);
1391 0 : uncompressed->Append(buf, writer.Produced());
1392 0 : return result;
1393 : } else {
1394 0 : SnappySinkAllocator allocator(uncompressed);
1395 0 : SnappyScatteredWriter<SnappySinkAllocator> writer(allocator);
1396 0 : return InternalUncompressAllTags(&decompressor, &writer, uncompressed_len);
1397 : }
1398 : }
1399 :
1400 : } // end namespace snappy
|