flate2/mem.rs
1use std::error::Error;
2use std::fmt;
3use std::io;
4
5use crate::ffi::{self, Backend, Deflate, DeflateBackend, ErrorMessage, Inflate, InflateBackend};
6use crate::Compression;
7
8/// Raw in-memory compression stream for blocks of data.
9///
10/// This type is the building block for the I/O streams in the rest of this
11/// crate. It requires more management than the [`Read`]/[`Write`] API but is
12/// maximally flexible in terms of accepting input from any source and being
13/// able to produce output to any memory location.
14///
15/// It is recommended to use the I/O stream adaptors over this type as they're
16/// easier to use.
17///
18/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html
19/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html
20#[derive(Debug)]
21pub struct Compress {
22 inner: Deflate,
23}
24
25/// Raw in-memory decompression stream for blocks of data.
26///
27/// This type is the building block for the I/O streams in the rest of this
28/// crate. It requires more management than the [`Read`]/[`Write`] API but is
29/// maximally flexible in terms of accepting input from any source and being
30/// able to produce output to any memory location.
31///
32/// It is recommended to use the I/O stream adaptors over this type as they're
33/// easier to use.
34///
35/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html
36/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html
37#[derive(Debug)]
38pub struct Decompress {
39 inner: Inflate,
40}
41
42/// Values which indicate the form of flushing to be used when compressing
43/// in-memory data.
44#[derive(Copy, Clone, PartialEq, Eq, Debug)]
45#[non_exhaustive]
46#[allow(clippy::unnecessary_cast)]
47pub enum FlushCompress {
48 /// A typical parameter for passing to compression/decompression functions,
49 /// this indicates that the underlying stream to decide how much data to
50 /// accumulate before producing output in order to maximize compression.
51 None = ffi::MZ_NO_FLUSH as isize,
52
53 /// All pending output is flushed to the output buffer, but the output is
54 /// not aligned to a byte boundary.
55 ///
56 /// All input data so far will be available to the decompressor (as with
57 /// `Flush::Sync`). This completes the current deflate block and follows it
58 /// with an empty fixed codes block that is 10 bytes long, and it assures
59 /// that enough bytes are output in order for the decompressor to finish the
60 /// block before the empty fixed code block.
61 Partial = ffi::MZ_PARTIAL_FLUSH as isize,
62
63 /// All pending output is flushed to the output buffer and the output is
64 /// aligned on a byte boundary so that the decompressor can get all input
65 /// data available so far.
66 ///
67 /// Flushing may degrade compression for some compression algorithms and so
68 /// it should only be used when necessary. This will complete the current
69 /// deflate block and follow it with an empty stored block.
70 Sync = ffi::MZ_SYNC_FLUSH as isize,
71
72 /// All output is flushed as with `Flush::Sync` and the compression state is
73 /// reset so decompression can restart from this point if previous
74 /// compressed data has been damaged or if random access is desired.
75 ///
76 /// Using this option too often can seriously degrade compression.
77 Full = ffi::MZ_FULL_FLUSH as isize,
78
79 /// Pending input is processed and pending output is flushed.
80 ///
81 /// The return value may indicate that the stream is not yet done and more
82 /// data has yet to be processed.
83 Finish = ffi::MZ_FINISH as isize,
84}
85
86/// Values which indicate the form of flushing to be used when
87/// decompressing in-memory data.
88#[derive(Copy, Clone, PartialEq, Eq, Debug)]
89#[non_exhaustive]
90#[allow(clippy::unnecessary_cast)]
91pub enum FlushDecompress {
92 /// A typical parameter for passing to compression/decompression functions,
93 /// this indicates that the underlying stream to decide how much data to
94 /// accumulate before producing output in order to maximize compression.
95 None = ffi::MZ_NO_FLUSH as isize,
96
97 /// All pending output is flushed to the output buffer and the output is
98 /// aligned on a byte boundary so that the decompressor can get all input
99 /// data available so far.
100 ///
101 /// Flushing may degrade compression for some compression algorithms and so
102 /// it should only be used when necessary. This will complete the current
103 /// deflate block and follow it with an empty stored block.
104 Sync = ffi::MZ_SYNC_FLUSH as isize,
105
106 /// Pending input is processed and pending output is flushed.
107 ///
108 /// The return value may indicate that the stream is not yet done and more
109 /// data has yet to be processed.
110 Finish = ffi::MZ_FINISH as isize,
111}
112
113/// The inner state for an error when decompressing
114#[derive(Clone, Debug)]
115pub(crate) enum DecompressErrorInner {
116 General { msg: ErrorMessage },
117 NeedsDictionary(u32),
118}
119
120/// Error returned when a decompression object finds that the input stream of
121/// bytes was not a valid input stream of bytes.
122#[derive(Clone, Debug)]
123pub struct DecompressError(pub(crate) DecompressErrorInner);
124
125impl DecompressError {
126 /// Indicates whether decompression failed due to requiring a dictionary.
127 ///
128 /// The resulting integer is the Adler-32 checksum of the dictionary
129 /// required.
130 pub fn needs_dictionary(&self) -> Option<u32> {
131 match self.0 {
132 DecompressErrorInner::NeedsDictionary(adler) => Some(adler),
133 _ => None,
134 }
135 }
136}
137
138#[inline]
139pub(crate) fn decompress_failed<T>(msg: ErrorMessage) -> Result<T, DecompressError> {
140 Err(DecompressError(DecompressErrorInner::General { msg }))
141}
142
143#[inline]
144pub(crate) fn decompress_need_dict<T>(adler: u32) -> Result<T, DecompressError> {
145 Err(DecompressError(DecompressErrorInner::NeedsDictionary(
146 adler,
147 )))
148}
149
150/// Error returned when a compression object is used incorrectly or otherwise
151/// generates an error.
152#[derive(Clone, Debug)]
153pub struct CompressError {
154 pub(crate) msg: ErrorMessage,
155}
156
157#[inline]
158pub(crate) fn compress_failed<T>(msg: ErrorMessage) -> Result<T, CompressError> {
159 Err(CompressError { msg })
160}
161
162/// Possible status results of compressing some data or successfully
163/// decompressing a block of data.
164#[derive(Copy, Clone, PartialEq, Eq, Debug)]
165pub enum Status {
166 /// Indicates success.
167 ///
168 /// Means that more input may be needed but isn't available
169 /// and/or there's more output to be written but the output buffer is full.
170 Ok,
171
172 /// Indicates that forward progress is not possible due to input or output
173 /// buffers being empty.
174 ///
175 /// For compression it means the input buffer needs some more data or the
176 /// output buffer needs to be freed up before trying again.
177 ///
178 /// For decompression this means that more input is needed to continue or
179 /// the output buffer isn't large enough to contain the result. The function
180 /// can be called again after fixing both.
181 BufError,
182
183 /// Indicates that all input has been consumed and all output bytes have
184 /// been written. Decompression/compression should not be called again.
185 ///
186 /// For decompression with zlib streams the adler-32 of the decompressed
187 /// data has also been verified.
188 StreamEnd,
189}
190
191impl Compress {
192 /// Creates a new object ready for compressing data that it's given.
193 ///
194 /// The `level` argument here indicates what level of compression is going
195 /// to be performed, and the `zlib_header` argument indicates whether the
196 /// output data should have a zlib header or not.
197 pub fn new(level: Compression, zlib_header: bool) -> Compress {
198 Compress {
199 inner: Deflate::make(level, zlib_header, ffi::MZ_DEFAULT_WINDOW_BITS as u8),
200 }
201 }
202
203 /// Creates a new object ready for compressing data that it's given.
204 ///
205 /// The `level` argument here indicates what level of compression is going
206 /// to be performed, and the `zlib_header` argument indicates whether the
207 /// output data should have a zlib header or not. The `window_bits` parameter
208 /// indicates the base-2 logarithm of the sliding window size and must be
209 /// between 9 and 15.
210 ///
211 /// # Panics
212 ///
213 /// If `window_bits` does not fall into the range 9 ..= 15,
214 /// `new_with_window_bits` will panic.
215 #[cfg(feature = "any_zlib")]
216 pub fn new_with_window_bits(
217 level: Compression,
218 zlib_header: bool,
219 window_bits: u8,
220 ) -> Compress {
221 assert!(
222 window_bits > 8 && window_bits < 16,
223 "window_bits must be within 9 ..= 15"
224 );
225 Compress {
226 inner: Deflate::make(level, zlib_header, window_bits),
227 }
228 }
229
230 /// Creates a new object ready for compressing data that it's given.
231 ///
232 /// The `level` argument here indicates what level of compression is going
233 /// to be performed.
234 ///
235 /// The Compress object produced by this constructor outputs gzip headers
236 /// for the compressed data.
237 ///
238 /// # Panics
239 ///
240 /// If `window_bits` does not fall into the range 9 ..= 15,
241 /// `new_with_window_bits` will panic.
242 #[cfg(feature = "any_zlib")]
243 pub fn new_gzip(level: Compression, window_bits: u8) -> Compress {
244 assert!(
245 window_bits > 8 && window_bits < 16,
246 "window_bits must be within 9 ..= 15"
247 );
248 Compress {
249 inner: Deflate::make(level, true, window_bits + 16),
250 }
251 }
252
253 /// Returns the total number of input bytes which have been processed by
254 /// this compression object.
255 pub fn total_in(&self) -> u64 {
256 self.inner.total_in()
257 }
258
259 /// Returns the total number of output bytes which have been produced by
260 /// this compression object.
261 pub fn total_out(&self) -> u64 {
262 self.inner.total_out()
263 }
264
265 /// Specifies the compression dictionary to use.
266 ///
267 /// Returns the Adler-32 checksum of the dictionary.
268 #[cfg(feature = "any_zlib")]
269 pub fn set_dictionary(&mut self, dictionary: &[u8]) -> Result<u32, CompressError> {
270 // SAFETY: The field `inner` must always be accessed as a raw pointer,
271 // since it points to a cyclic structure. No copies of `inner` can be
272 // retained for longer than the lifetime of `self.inner.inner.stream_wrapper`.
273 let stream = self.inner.inner.stream_wrapper.inner;
274 let rc = unsafe {
275 (*stream).msg = std::ptr::null_mut();
276 assert!(dictionary.len() < ffi::uInt::MAX as usize);
277 ffi::deflateSetDictionary(stream, dictionary.as_ptr(), dictionary.len() as ffi::uInt)
278 };
279
280 match rc {
281 ffi::MZ_STREAM_ERROR => compress_failed(self.inner.inner.msg()),
282 #[allow(clippy::unnecessary_cast)]
283 ffi::MZ_OK => Ok(unsafe { (*stream).adler } as u32),
284 c => panic!("unknown return code: {}", c),
285 }
286 }
287
288 /// Quickly resets this compressor without having to reallocate anything.
289 ///
290 /// This is equivalent to dropping this object and then creating a new one.
291 pub fn reset(&mut self) {
292 self.inner.reset();
293 }
294
295 /// Dynamically updates the compression level.
296 ///
297 /// This can be used to switch between compression levels for different
298 /// kinds of data, or it can be used in conjunction with a call to reset
299 /// to reuse the compressor.
300 ///
301 /// This may return an error if there wasn't enough output space to complete
302 /// the compression of the available input data before changing the
303 /// compression level. Flushing the stream before calling this method
304 /// ensures that the function will succeed on the first call.
305 #[cfg(feature = "any_zlib")]
306 pub fn set_level(&mut self, level: Compression) -> Result<(), CompressError> {
307 use std::os::raw::c_int;
308 // SAFETY: The field `inner` must always be accessed as a raw pointer,
309 // since it points to a cyclic structure. No copies of `inner` can be
310 // retained for longer than the lifetime of `self.inner.inner.stream_wrapper`.
311 let stream = self.inner.inner.stream_wrapper.inner;
312 unsafe {
313 (*stream).msg = std::ptr::null_mut();
314 }
315 let rc = unsafe { ffi::deflateParams(stream, level.0 as c_int, ffi::MZ_DEFAULT_STRATEGY) };
316
317 match rc {
318 ffi::MZ_OK => Ok(()),
319 ffi::MZ_BUF_ERROR => compress_failed(self.inner.inner.msg()),
320 c => panic!("unknown return code: {}", c),
321 }
322 }
323
324 /// Compresses the input data into the output, consuming only as much
325 /// input as needed and writing as much output as possible.
326 ///
327 /// The flush option can be any of the available `FlushCompress` parameters.
328 ///
329 /// To learn how much data was consumed or how much output was produced, use
330 /// the `total_in` and `total_out` functions before/after this is called.
331 pub fn compress(
332 &mut self,
333 input: &[u8],
334 output: &mut [u8],
335 flush: FlushCompress,
336 ) -> Result<Status, CompressError> {
337 self.inner.compress(input, output, flush)
338 }
339
340 /// Compresses the input data into the extra space of the output, consuming
341 /// only as much input as needed and writing as much output as possible.
342 ///
343 /// This function has the same semantics as `compress`, except that the
344 /// length of `vec` is managed by this function. This will not reallocate
345 /// the vector provided or attempt to grow it, so space for the output must
346 /// be reserved in the output vector by the caller before calling this
347 /// function.
348 pub fn compress_vec(
349 &mut self,
350 input: &[u8],
351 output: &mut Vec<u8>,
352 flush: FlushCompress,
353 ) -> Result<Status, CompressError> {
354 write_to_spare_capacity_of_vec(output, |out| {
355 let before = self.total_out();
356 let ret = self.compress(input, out, flush);
357 let bytes_written = self.total_out() - before;
358 (bytes_written as usize, ret)
359 })
360 }
361}
362
363impl Decompress {
364 /// Creates a new object ready for decompressing data that it's given.
365 ///
366 /// The `zlib_header` argument indicates whether the input data is expected
367 /// to have a zlib header or not.
368 pub fn new(zlib_header: bool) -> Decompress {
369 Decompress {
370 inner: Inflate::make(zlib_header, ffi::MZ_DEFAULT_WINDOW_BITS as u8),
371 }
372 }
373
374 /// Creates a new object ready for decompressing data that it's given.
375 ///
376 /// The `zlib_header` argument indicates whether the input data is expected
377 /// to have a zlib header or not. The `window_bits` parameter indicates the
378 /// base-2 logarithm of the sliding window size and must be between 9 and 15.
379 ///
380 /// # Panics
381 ///
382 /// If `window_bits` does not fall into the range 9 ..= 15,
383 /// `new_with_window_bits` will panic.
384 #[cfg(feature = "any_zlib")]
385 pub fn new_with_window_bits(zlib_header: bool, window_bits: u8) -> Decompress {
386 assert!(
387 window_bits > 8 && window_bits < 16,
388 "window_bits must be within 9 ..= 15"
389 );
390 Decompress {
391 inner: Inflate::make(zlib_header, window_bits),
392 }
393 }
394
395 /// Creates a new object ready for decompressing data that it's given.
396 ///
397 /// The Decompress object produced by this constructor expects gzip headers
398 /// for the compressed data.
399 ///
400 /// # Panics
401 ///
402 /// If `window_bits` does not fall into the range 9 ..= 15,
403 /// `new_with_window_bits` will panic.
404 #[cfg(feature = "any_zlib")]
405 pub fn new_gzip(window_bits: u8) -> Decompress {
406 assert!(
407 window_bits > 8 && window_bits < 16,
408 "window_bits must be within 9 ..= 15"
409 );
410 Decompress {
411 inner: Inflate::make(true, window_bits + 16),
412 }
413 }
414
415 /// Returns the total number of input bytes which have been processed by
416 /// this decompression object.
417 pub fn total_in(&self) -> u64 {
418 self.inner.total_in()
419 }
420
421 /// Returns the total number of output bytes which have been produced by
422 /// this decompression object.
423 pub fn total_out(&self) -> u64 {
424 self.inner.total_out()
425 }
426
427 /// Decompresses the input data into the output, consuming only as much
428 /// input as needed and writing as much output as possible.
429 ///
430 /// The flush option can be any of the available `FlushDecompress` parameters.
431 ///
432 /// If the first call passes `FlushDecompress::Finish` it is assumed that
433 /// the input and output buffers are both sized large enough to decompress
434 /// the entire stream in a single call.
435 ///
436 /// A flush value of `FlushDecompress::Finish` indicates that there are no
437 /// more source bytes available beside what's already in the input buffer,
438 /// and the output buffer is large enough to hold the rest of the
439 /// decompressed data.
440 ///
441 /// To learn how much data was consumed or how much output was produced, use
442 /// the `total_in` and `total_out` functions before/after this is called.
443 ///
444 /// # Errors
445 ///
446 /// If the input data to this instance of `Decompress` is not a valid
447 /// zlib/deflate stream then this function may return an instance of
448 /// `DecompressError` to indicate that the stream of input bytes is corrupted.
449 pub fn decompress(
450 &mut self,
451 input: &[u8],
452 output: &mut [u8],
453 flush: FlushDecompress,
454 ) -> Result<Status, DecompressError> {
455 self.inner.decompress(input, output, flush)
456 }
457
458 /// Decompresses the input data into the extra space in the output vector
459 /// specified by `output`.
460 ///
461 /// This function has the same semantics as `decompress`, except that the
462 /// length of `vec` is managed by this function. This will not reallocate
463 /// the vector provided or attempt to grow it, so space for the output must
464 /// be reserved in the output vector by the caller before calling this
465 /// function.
466 ///
467 /// # Errors
468 ///
469 /// If the input data to this instance of `Decompress` is not a valid
470 /// zlib/deflate stream then this function may return an instance of
471 /// `DecompressError` to indicate that the stream of input bytes is corrupted.
472 pub fn decompress_vec(
473 &mut self,
474 input: &[u8],
475 output: &mut Vec<u8>,
476 flush: FlushDecompress,
477 ) -> Result<Status, DecompressError> {
478 write_to_spare_capacity_of_vec(output, |out| {
479 let before = self.total_out();
480 let ret = self.decompress(input, out, flush);
481 let bytes_written = self.total_out() - before;
482 (bytes_written as usize, ret)
483 })
484 }
485
486 /// Specifies the decompression dictionary to use.
487 #[cfg(feature = "any_zlib")]
488 pub fn set_dictionary(&mut self, dictionary: &[u8]) -> Result<u32, DecompressError> {
489 // SAFETY: The field `inner` must always be accessed as a raw pointer,
490 // since it points to a cyclic structure. No copies of `inner` can be
491 // retained for longer than the lifetime of `self.inner.inner.stream_wrapper`.
492 let stream = self.inner.inner.stream_wrapper.inner;
493 let rc = unsafe {
494 (*stream).msg = std::ptr::null_mut();
495 assert!(dictionary.len() < ffi::uInt::MAX as usize);
496 ffi::inflateSetDictionary(stream, dictionary.as_ptr(), dictionary.len() as ffi::uInt)
497 };
498
499 #[allow(clippy::unnecessary_cast)]
500 match rc {
501 ffi::MZ_STREAM_ERROR => decompress_failed(self.inner.inner.msg()),
502 ffi::MZ_DATA_ERROR => decompress_need_dict(unsafe { (*stream).adler } as u32),
503 ffi::MZ_OK => Ok(unsafe { (*stream).adler } as u32),
504 c => panic!("unknown return code: {}", c),
505 }
506 }
507
508 /// Performs the equivalent of replacing this decompression state with a
509 /// freshly allocated copy.
510 ///
511 /// This function may not allocate memory, though, and attempts to reuse any
512 /// previously existing resources.
513 ///
514 /// The argument provided here indicates whether the reset state will
515 /// attempt to decode a zlib header first or not.
516 pub fn reset(&mut self, zlib_header: bool) {
517 self.inner.reset(zlib_header);
518 }
519}
520
521impl Error for DecompressError {}
522
523impl DecompressError {
524 /// Retrieve the implementation's message about why the operation failed, if one exists.
525 pub fn message(&self) -> Option<&str> {
526 match &self.0 {
527 DecompressErrorInner::General { msg } => msg.get(),
528 _ => None,
529 }
530 }
531}
532
533impl From<DecompressError> for io::Error {
534 fn from(data: DecompressError) -> io::Error {
535 io::Error::new(io::ErrorKind::Other, data)
536 }
537}
538
539impl fmt::Display for DecompressError {
540 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
541 let msg = match &self.0 {
542 DecompressErrorInner::General { msg } => msg.get(),
543 DecompressErrorInner::NeedsDictionary { .. } => Some("requires a dictionary"),
544 };
545 match msg {
546 Some(msg) => write!(f, "deflate decompression error: {}", msg),
547 None => write!(f, "deflate decompression error"),
548 }
549 }
550}
551
552impl Error for CompressError {}
553
554impl CompressError {
555 /// Retrieve the implementation's message about why the operation failed, if one exists.
556 pub fn message(&self) -> Option<&str> {
557 self.msg.get()
558 }
559}
560
561impl From<CompressError> for io::Error {
562 fn from(data: CompressError) -> io::Error {
563 io::Error::new(io::ErrorKind::Other, data)
564 }
565}
566
567impl fmt::Display for CompressError {
568 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
569 match self.msg.get() {
570 Some(msg) => write!(f, "deflate compression error: {}", msg),
571 None => write!(f, "deflate compression error"),
572 }
573 }
574}
575
576/// Allows `writer` to write data into the spare capacity of the `output` vector.
577/// This will not reallocate the vector provided or attempt to grow it, so space
578/// for the `output` must be reserved by the caller before calling this
579/// function.
580///
581/// `writer` needs to return the number of bytes written (and can also return
582/// another arbitrary return value).
583fn write_to_spare_capacity_of_vec<T>(
584 output: &mut Vec<u8>,
585 writer: impl FnOnce(&mut [u8]) -> (usize, T),
586) -> T {
587 let cap = output.capacity();
588 let len = output.len();
589
590 output.resize(output.capacity(), 0);
591 let (bytes_written, ret) = writer(&mut output[len..]);
592
593 let new_len = core::cmp::min(len + bytes_written, cap); // Sanitizes `bytes_written`.
594 output.resize(new_len, 0 /* unused */);
595
596 ret
597}
598
599#[cfg(test)]
600mod tests {
601 use std::io::Write;
602
603 use crate::write;
604 use crate::{Compression, Decompress, FlushDecompress};
605
606 #[cfg(feature = "any_zlib")]
607 use crate::{Compress, FlushCompress};
608
609 #[test]
610 fn issue51() {
611 let data = vec![
612 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xb3, 0xc9, 0x28, 0xc9,
613 0xcd, 0xb1, 0xe3, 0xe5, 0xb2, 0xc9, 0x48, 0x4d, 0x4c, 0xb1, 0xb3, 0x29, 0xc9, 0x2c,
614 0xc9, 0x49, 0xb5, 0x33, 0x31, 0x30, 0x51, 0xf0, 0xcb, 0x2f, 0x51, 0x70, 0xcb, 0x2f,
615 0xcd, 0x4b, 0xb1, 0xd1, 0x87, 0x08, 0xda, 0xe8, 0x83, 0x95, 0x00, 0x95, 0x26, 0xe5,
616 0xa7, 0x54, 0x2a, 0x24, 0xa5, 0x27, 0xe7, 0xe7, 0xe4, 0x17, 0xd9, 0x2a, 0x95, 0x67,
617 0x64, 0x96, 0xa4, 0x2a, 0x81, 0x8c, 0x48, 0x4e, 0xcd, 0x2b, 0x49, 0x2d, 0xb2, 0xb3,
618 0xc9, 0x30, 0x44, 0x37, 0x01, 0x28, 0x62, 0xa3, 0x0f, 0x95, 0x06, 0xd9, 0x05, 0x54,
619 0x04, 0xe5, 0xe5, 0xa5, 0x67, 0xe6, 0x55, 0xe8, 0x1b, 0xea, 0x99, 0xe9, 0x19, 0x21,
620 0xab, 0xd0, 0x07, 0xd9, 0x01, 0x32, 0x53, 0x1f, 0xea, 0x3e, 0x00, 0x94, 0x85, 0xeb,
621 0xe4, 0xa8, 0x00, 0x00, 0x00,
622 ];
623
624 let mut decoded = Vec::with_capacity(data.len() * 2);
625
626 let mut d = Decompress::new(false);
627 // decompressed whole deflate stream
628 assert!(d
629 .decompress_vec(&data[10..], &mut decoded, FlushDecompress::Finish)
630 .is_ok());
631
632 // decompress data that has nothing to do with the deflate stream (this
633 // used to panic)
634 drop(d.decompress_vec(&[0], &mut decoded, FlushDecompress::None));
635 }
636
637 #[test]
638 fn reset() {
639 let string = "hello world".as_bytes();
640 let mut zlib = Vec::new();
641 let mut deflate = Vec::new();
642
643 let comp = Compression::default();
644 write::ZlibEncoder::new(&mut zlib, comp)
645 .write_all(string)
646 .unwrap();
647 write::DeflateEncoder::new(&mut deflate, comp)
648 .write_all(string)
649 .unwrap();
650
651 let mut dst = [0; 1024];
652 let mut decoder = Decompress::new(true);
653 decoder
654 .decompress(&zlib, &mut dst, FlushDecompress::Finish)
655 .unwrap();
656 assert_eq!(decoder.total_out(), string.len() as u64);
657 assert!(dst.starts_with(string));
658
659 decoder.reset(false);
660 decoder
661 .decompress(&deflate, &mut dst, FlushDecompress::Finish)
662 .unwrap();
663 assert_eq!(decoder.total_out(), string.len() as u64);
664 assert!(dst.starts_with(string));
665 }
666
667 #[cfg(feature = "any_zlib")]
668 #[test]
669 fn set_dictionary_with_zlib_header() {
670 let string = "hello, hello!".as_bytes();
671 let dictionary = "hello".as_bytes();
672
673 let mut encoded = Vec::with_capacity(1024);
674
675 let mut encoder = Compress::new(Compression::default(), true);
676
677 let dictionary_adler = encoder.set_dictionary(&dictionary).unwrap();
678
679 encoder
680 .compress_vec(string, &mut encoded, FlushCompress::Finish)
681 .unwrap();
682
683 assert_eq!(encoder.total_in(), string.len() as u64);
684 assert_eq!(encoder.total_out(), encoded.len() as u64);
685
686 let mut decoder = Decompress::new(true);
687 let mut decoded = [0; 1024];
688 let decompress_error = decoder
689 .decompress(&encoded, &mut decoded, FlushDecompress::Finish)
690 .expect_err("decompression should fail due to requiring a dictionary");
691
692 let required_adler = decompress_error.needs_dictionary()
693 .expect("the first call to decompress should indicate a dictionary is required along with the required Adler-32 checksum");
694
695 assert_eq!(required_adler, dictionary_adler,
696 "the Adler-32 checksum should match the value when the dictionary was set on the compressor");
697
698 let actual_adler = decoder.set_dictionary(&dictionary).unwrap();
699
700 assert_eq!(required_adler, actual_adler);
701
702 // Decompress the rest of the input to the remainder of the output buffer
703 let total_in = decoder.total_in();
704 let total_out = decoder.total_out();
705
706 let decompress_result = decoder.decompress(
707 &encoded[total_in as usize..],
708 &mut decoded[total_out as usize..],
709 FlushDecompress::Finish,
710 );
711 assert!(decompress_result.is_ok());
712
713 assert_eq!(&decoded[..decoder.total_out() as usize], string);
714 }
715
716 #[cfg(feature = "any_zlib")]
717 #[test]
718 fn set_dictionary_raw() {
719 let string = "hello, hello!".as_bytes();
720 let dictionary = "hello".as_bytes();
721
722 let mut encoded = Vec::with_capacity(1024);
723
724 let mut encoder = Compress::new(Compression::default(), false);
725
726 encoder.set_dictionary(&dictionary).unwrap();
727
728 encoder
729 .compress_vec(string, &mut encoded, FlushCompress::Finish)
730 .unwrap();
731
732 assert_eq!(encoder.total_in(), string.len() as u64);
733 assert_eq!(encoder.total_out(), encoded.len() as u64);
734
735 let mut decoder = Decompress::new(false);
736
737 decoder.set_dictionary(&dictionary).unwrap();
738
739 let mut decoded = [0; 1024];
740 let decompress_result = decoder.decompress(&encoded, &mut decoded, FlushDecompress::Finish);
741
742 assert!(decompress_result.is_ok());
743
744 assert_eq!(&decoded[..decoder.total_out() as usize], string);
745 }
746
747 #[cfg(feature = "any_zlib")]
748 #[test]
749 fn test_gzip_flate() {
750 let string = "hello, hello!".as_bytes();
751
752 let mut encoded = Vec::with_capacity(1024);
753
754 let mut encoder = Compress::new_gzip(Compression::default(), 9);
755
756 encoder
757 .compress_vec(string, &mut encoded, FlushCompress::Finish)
758 .unwrap();
759
760 assert_eq!(encoder.total_in(), string.len() as u64);
761 assert_eq!(encoder.total_out(), encoded.len() as u64);
762
763 let mut decoder = Decompress::new_gzip(9);
764
765 let mut decoded = [0; 1024];
766 decoder
767 .decompress(&encoded, &mut decoded, FlushDecompress::Finish)
768 .unwrap();
769
770 assert_eq!(&decoded[..decoder.total_out() as usize], string);
771 }
772
773 #[cfg(feature = "any_zlib")]
774 #[test]
775 fn test_error_message() {
776 let mut decoder = Decompress::new(false);
777 let mut decoded = [0; 128];
778 let garbage = b"xbvxzi";
779
780 let err = decoder
781 .decompress(&*garbage, &mut decoded, FlushDecompress::Finish)
782 .unwrap_err();
783
784 assert_eq!(err.message(), Some("invalid stored block lengths"));
785 }
786}