zlib-0.7.1.1/0000755000000000000000000000000007346545000011007 5ustar0000000000000000zlib-0.7.1.1/Codec/Compression/0000755000000000000000000000000007346545000014325 5ustar0000000000000000zlib-0.7.1.1/Codec/Compression/GZip.hs0000644000000000000000000000736507346545000015545 0ustar0000000000000000----------------------------------------------------------------------------- -- | -- Copyright : (c) 2006-2014 Duncan Coutts -- License : BSD-style -- -- Maintainer : duncan@community.haskell.org -- -- Compression and decompression of data streams in the gzip format. -- -- The format is described in detail in RFC #1952: -- -- -- See also the zlib home page: -- ----------------------------------------------------------------------------- module Codec.Compression.GZip ( -- | This module provides pure functions for compressing and decompressing -- streams of data in the gzip format and represented by lazy 'ByteString's. -- This makes it easy to use either in memory or with disk or network IO. -- -- For example a simple gzip compression program is just: -- -- > import qualified Data.ByteString.Lazy as ByteString -- > import qualified Codec.Compression.GZip as GZip -- > -- > main = ByteString.interact GZip.compress -- -- Or you could lazily read in and decompress a @.gz@ file using: -- -- > content <- fmap GZip.decompress (readFile file) -- -- * Simple compression and decompression compress, decompress, DecompressError(..), -- * Extended API with control over compression parameters compressWith, decompressWith, CompressParams(..), defaultCompressParams, DecompressParams(..), defaultDecompressParams, -- ** The compression parameter types CompressionLevel(..), defaultCompression, noCompression, bestSpeed, bestCompression, compressionLevel, Method, deflateMethod, WindowBits(..), defaultWindowBits, windowBits, MemoryLevel(..), defaultMemoryLevel, minMemoryLevel, maxMemoryLevel, memoryLevel, CompressionStrategy, defaultStrategy, filteredStrategy, huffmanOnlyStrategy, rleStrategy, fixedStrategy, ) where import Data.ByteString.Lazy (ByteString) import qualified Codec.Compression.Zlib.Internal as Internal import Codec.Compression.Zlib.Internal hiding (compress, decompress) -- | Decompress a stream of data in the gzip format, -- throw 'DecompressError' on failure. -- -- Note that the decompression is performed /lazily/. Errors in the data stream -- may not be detected until the end of the stream is demanded (since it is -- only at the end that the final checksum can be checked). If this is -- important to you, you must make sure to consume the whole decompressed -- stream before doing any IO action that depends on it. -- decompress :: ByteString -> ByteString decompress = decompressWith defaultDecompressParams -- | Like 'Codec.Compression.Gzip.decompress' but with the ability to specify various decompression -- parameters. Typical usage: -- -- > decompressWith defaultCompressParams { ... } -- decompressWith :: DecompressParams -> ByteString -> ByteString decompressWith = Internal.decompress gzipFormat -- | Compress a stream of data into the gzip format. -- -- This uses the default compression parameters. In particular it uses the -- default compression level which favours a higher compression ratio over -- compression speed, though it does not use the maximum compression level. -- -- Use 'compressWith' to adjust the compression level or other compression -- parameters. -- compress :: ByteString -> ByteString compress = compressWith defaultCompressParams -- | Like 'Codec.Compression.Gzip.compress' but with the ability to specify various compression -- parameters. Typical usage: -- -- > compressWith defaultCompressParams { ... } -- -- In particular you can set the compression level: -- -- > compressWith defaultCompressParams { compressLevel = BestCompression } -- compressWith :: CompressParams -> ByteString -> ByteString compressWith = Internal.compress gzipFormat zlib-0.7.1.1/Codec/Compression/Zlib.hs0000644000000000000000000000657407346545000015575 0ustar0000000000000000----------------------------------------------------------------------------- -- | -- Copyright : (c) 2006-2014 Duncan Coutts -- License : BSD-style -- -- Maintainer : duncan@community.haskell.org -- -- Compression and decompression of data streams in the zlib format. -- -- The format is described in detail in RFC #1950: -- -- -- See also the zlib home page: -- ----------------------------------------------------------------------------- module Codec.Compression.Zlib ( -- | This module provides pure functions for compressing and decompressing -- streams of data in the zlib format and represented by lazy 'ByteString's. -- This makes it easy to use either in memory or with disk or network IO. -- * Simple compression and decompression compress, decompress, DecompressError(..), -- * Extended API with control over compression parameters compressWith, decompressWith, CompressParams(..), defaultCompressParams, DecompressParams(..), defaultDecompressParams, -- ** The compression parameter types CompressionLevel(..), defaultCompression, noCompression, bestSpeed, bestCompression, compressionLevel, Method, deflateMethod, WindowBits(..), defaultWindowBits, windowBits, MemoryLevel(..), defaultMemoryLevel, minMemoryLevel, maxMemoryLevel, memoryLevel, CompressionStrategy, defaultStrategy, filteredStrategy, huffmanOnlyStrategy, rleStrategy, fixedStrategy, ) where import Data.ByteString.Lazy (ByteString) import qualified Codec.Compression.Zlib.Internal as Internal import Codec.Compression.Zlib.Internal hiding (compress, decompress) -- | Decompress a stream of data in the zlib format, -- throw 'DecompressError' on failure. -- -- Note that the decompression is performed /lazily/. Errors in the data stream -- may not be detected until the end of the stream is demanded (since it is -- only at the end that the final checksum can be checked). If this is -- important to you, you must make sure to consume the whole decompressed -- stream before doing any IO action that depends on it. -- decompress :: ByteString -> ByteString decompress = decompressWith defaultDecompressParams -- | Like 'Codec.Compression.Zlib.decompress' but with the ability to specify various decompression -- parameters. Typical usage: -- -- > decompressWith defaultCompressParams { ... } -- decompressWith :: DecompressParams -> ByteString -> ByteString decompressWith = Internal.decompress zlibFormat -- | Compress a stream of data into the zlib format. -- -- This uses the default compression parameters. In particular it uses the -- default compression level which favours a higher compression ratio over -- compression speed, though it does not use the maximum compression level. -- -- Use 'compressWith' to adjust the compression level or other compression -- parameters. -- compress :: ByteString -> ByteString compress = compressWith defaultCompressParams -- | Like 'Codec.Compression.Zlib.compress' but with the ability to specify various compression -- parameters. Typical usage: -- -- > compressWith defaultCompressParams { ... } -- -- In particular you can set the compression level: -- -- > compressWith defaultCompressParams { compressLevel = BestCompression } -- compressWith :: CompressParams -> ByteString -> ByteString compressWith = Internal.compress zlibFormat zlib-0.7.1.1/Codec/Compression/Zlib/0000755000000000000000000000000007346545000015225 5ustar0000000000000000zlib-0.7.1.1/Codec/Compression/Zlib/ByteStringCompat.hs0000644000000000000000000000336007346545000021021 0ustar0000000000000000-- Lifted from the text package, with light editing: -- Data.Text.Internal.ByteStringCompat {-# LANGUAGE BangPatterns #-} {-# LANGUAGE CPP #-} {-# LANGUAGE MagicHash #-} module Codec.Compression.Zlib.ByteStringCompat (mkBS, withBS) where import Data.ByteString.Internal (ByteString (..)) import Data.Word (Word8) import Foreign.ForeignPtr (ForeignPtr) #if MIN_VERSION_base(4,10,0) import GHC.ForeignPtr (plusForeignPtr) #else import GHC.ForeignPtr (ForeignPtr(ForeignPtr)) import GHC.Exts (Int (..), plusAddr#) #endif mkBS :: ForeignPtr Word8 -> Int -> Int -> ByteString #if MIN_VERSION_bytestring(0,11,0) mkBS dfp o n = BS (plusForeignPtr dfp o) n #else mkBS dfp o n = PS dfp o n #endif {-# INLINE mkBS #-} withBS :: ByteString -> (ForeignPtr Word8 -> Int -> r) -> r #if MIN_VERSION_bytestring(0,11,0) withBS (BS !sfp !slen) kont = kont sfp slen #else withBS (PS !sfp !soff !slen) kont = kont (plusForeignPtr sfp soff) slen #endif {-# INLINE withBS #-} #if !MIN_VERSION_base(4,10,0) -- |Advances the given address by the given offset in bytes. -- -- The new 'ForeignPtr' shares the finalizer of the original, -- equivalent from a finalization standpoint to just creating another -- reference to the original. That is, the finalizer will not be -- called before the new 'ForeignPtr' is unreachable, nor will it be -- called an additional time due to this call, and the finalizer will -- be called with the same address that it would have had this call -- not happened, *not* the new address. plusForeignPtr :: ForeignPtr a -> Int -> ForeignPtr b plusForeignPtr (ForeignPtr addr guts) (I# offset) = ForeignPtr (plusAddr# addr offset) guts {-# INLINE [0] plusForeignPtr #-} {-# RULES "ByteString plusForeignPtr/0" forall fp . plusForeignPtr fp 0 = fp #-} #endif zlib-0.7.1.1/Codec/Compression/Zlib/Internal.hs0000644000000000000000000011602207346545000017337 0ustar0000000000000000{-# LANGUAGE CPP, RankNTypes, BangPatterns #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE Trustworthy #-} ----------------------------------------------------------------------------- -- | -- Copyright : (c) 2006-2015 Duncan Coutts -- License : BSD-style -- -- Maintainer : duncan@community.haskell.org -- -- Pure and IO stream based interfaces to lower level zlib wrapper -- ----------------------------------------------------------------------------- module Codec.Compression.Zlib.Internal ( -- * Pure interface compress, decompress, -- * Monadic incremental interface -- $incremental-compression -- ** Using incremental compression -- $using-incremental-compression CompressStream(..), compressST, compressIO, foldCompressStream, foldCompressStreamWithInput, -- ** Using incremental decompression -- $using-incremental-decompression DecompressStream(..), DecompressError(..), decompressST, decompressIO, foldDecompressStream, foldDecompressStreamWithInput, -- * The compression parameter types CompressParams(..), defaultCompressParams, DecompressParams(..), defaultDecompressParams, Stream.Format, Stream.gzipFormat, Stream.zlibFormat, Stream.rawFormat, Stream.gzipOrZlibFormat, Stream.CompressionLevel(..), Stream.defaultCompression, Stream.noCompression, Stream.bestSpeed, Stream.bestCompression, Stream.compressionLevel, Stream.Method, Stream.deflateMethod, Stream.WindowBits(..), Stream.defaultWindowBits, Stream.windowBits, Stream.MemoryLevel(..), Stream.defaultMemoryLevel, Stream.minMemoryLevel, Stream.maxMemoryLevel, Stream.memoryLevel, Stream.CompressionStrategy, Stream.defaultStrategy, Stream.filteredStrategy, Stream.huffmanOnlyStrategy, Stream.rleStrategy, Stream.fixedStrategy, ) where import Prelude hiding (length) import Control.Monad (when) import Control.Exception (Exception, throw, assert) import Control.Monad.ST.Lazy hiding (stToIO) import Control.Monad.ST.Strict (stToIO) import qualified Control.Monad.ST.Unsafe as Unsafe (unsafeIOToST) import GHC.Generics (Generic) import Data.Bits (toIntegralSized) import qualified Data.ByteString.Lazy as L import qualified Data.ByteString.Lazy.Internal as L import qualified Data.ByteString as S import qualified Data.ByteString.Internal as S import Data.Maybe (fromMaybe) import Data.Word (Word8) import Foreign.C (CUInt) import GHC.IO (noDuplicate) import qualified Codec.Compression.Zlib.Stream as Stream import Codec.Compression.Zlib.ByteStringCompat (mkBS, withBS) import Codec.Compression.Zlib.Stream (Stream) -- | The full set of parameters for compression. The defaults are -- 'defaultCompressParams'. -- -- The 'compressBufferSize' is the size of the first output buffer containing -- the compressed data. If you know an approximate upper bound on the size of -- the compressed data then setting this parameter can save memory. The default -- compression output buffer size is @16k@. If your estimate is wrong it does -- not matter too much, the default buffer size will be used for the remaining -- chunks. -- data CompressParams = CompressParams { compressLevel :: !Stream.CompressionLevel, compressMethod :: !Stream.Method, compressWindowBits :: !Stream.WindowBits, compressMemoryLevel :: !Stream.MemoryLevel, compressStrategy :: !Stream.CompressionStrategy, compressBufferSize :: !Int, compressDictionary :: Maybe S.ByteString } deriving ( Eq -- ^ @since 0.7.0.0 , Ord -- ^ @since 0.7.0.0 , Show , Generic -- ^ @since 0.7.0.0 ) -- | The full set of parameters for decompression. The defaults are -- 'defaultDecompressParams'. -- -- The 'decompressBufferSize' is the size of the first output buffer, -- containing the uncompressed data. If you know an exact or approximate upper -- bound on the size of the decompressed data then setting this parameter can -- save memory. The default decompression output buffer size is @32k@. If your -- estimate is wrong it does not matter too much, the default buffer size will -- be used for the remaining chunks. -- -- One particular use case for setting the 'decompressBufferSize' is if you -- know the exact size of the decompressed data and want to produce a strict -- 'Data.ByteString.ByteString'. The compression and decompression functions -- use lazy 'Data.ByteString.Lazy.ByteString's but if you set the -- 'decompressBufferSize' correctly then you can generate a lazy -- 'Data.ByteString.Lazy.ByteString' with exactly one chunk, which can be -- converted to a strict 'Data.ByteString.ByteString' in @O(1)@ time using -- @'Data.ByteString.concat' . 'Data.ByteString.Lazy.toChunks'@. -- data DecompressParams = DecompressParams { decompressWindowBits :: !Stream.WindowBits, decompressBufferSize :: !Int, decompressDictionary :: Maybe S.ByteString, decompressAllMembers :: Bool } deriving ( Eq -- ^ @since 0.7.0.0 , Ord -- ^ @since 0.7.0.0 , Show , Generic -- ^ @since 0.7.0.0 ) -- | The default set of parameters for compression. This is typically used with -- 'Codec.Compression.GZip.compressWith' or 'Codec.Compression.Zlib.compressWith' -- with specific parameters overridden. -- defaultCompressParams :: CompressParams defaultCompressParams = CompressParams { compressLevel = Stream.defaultCompression, compressMethod = Stream.deflateMethod, compressWindowBits = Stream.defaultWindowBits, compressMemoryLevel = Stream.defaultMemoryLevel, compressStrategy = Stream.defaultStrategy, compressBufferSize = cuint2int defaultCompressBufferSize, compressDictionary = Nothing } -- | The default set of parameters for decompression. This is typically used with -- 'Codec.Compression.GZip.decompressWith' or 'Codec.Compression.Zlib.decompressWith' -- with specific parameters overridden. -- defaultDecompressParams :: DecompressParams defaultDecompressParams = DecompressParams { decompressWindowBits = Stream.defaultWindowBits, decompressBufferSize = cuint2int defaultDecompressBufferSize, decompressDictionary = Nothing, decompressAllMembers = True } -- | The default chunk sizes for the output of compression and decompression -- are 16k and 32k respectively (less a small accounting overhead). -- defaultCompressBufferSize, defaultDecompressBufferSize :: CUInt defaultCompressBufferSize = 16 * 1024 - int2cuint L.chunkOverhead defaultDecompressBufferSize = 32 * 1024 - int2cuint L.chunkOverhead -- | The unfolding of the decompression process, where you provide a sequence -- of compressed data chunks as input and receive a sequence of uncompressed -- data chunks as output. The process is incremental, in that the demand for -- input and provision of output are interleaved. -- -- To indicate the end of the input supply an empty input chunk. Note that -- for 'Stream.gzipFormat' with the default 'decompressAllMembers' @True@ you will -- have to do this, as the decompressor will look for any following members. -- With 'decompressAllMembers' @False@ the decompressor knows when the data -- ends and will produce 'DecompressStreamEnd' without you having to supply an -- empty chunk to indicate the end of the input. -- data DecompressStream m = DecompressInputRequired { decompressSupplyInput :: S.ByteString -> m (DecompressStream m) } | DecompressOutputAvailable { decompressOutput :: !S.ByteString, decompressNext :: m (DecompressStream m) } -- | Includes any trailing unconsumed /input/ data. | DecompressStreamEnd { decompressUnconsumedInput :: S.ByteString } -- | An error code | DecompressStreamError { decompressStreamError :: DecompressError } -- | The possible error cases when decompressing a stream. -- -- This can be 'show'n to give a human readable error message. -- data DecompressError = -- | The compressed data stream ended prematurely. This may happen if the -- input data stream was truncated. TruncatedInput -- | It is possible to do zlib compression with a custom dictionary. This -- allows slightly higher compression ratios for short files. However such -- compressed streams require the same dictionary when decompressing. This -- error is for when we encounter a compressed stream that needs a -- dictionary, and it's not provided. | DictionaryRequired -- | If the stream requires a dictionary and you provide one with the -- wrong 'Stream.DictionaryHash' then you will get this error. | DictionaryMismatch -- | If the compressed data stream is corrupted in any way then you will -- get this error, for example if the input data just isn't a compressed -- zlib data stream. In particular if the data checksum turns out to be -- wrong then you will get all the decompressed data but this error at the -- end, instead of the normal successful 'Stream.StreamEnd'. | DataFormatError String deriving ( Eq , Ord -- ^ @since 0.7.0.0 , Generic -- ^ @since 0.7.0.0 ) instance Show DecompressError where show TruncatedInput = modprefix "premature end of compressed data stream" show DictionaryRequired = modprefix "compressed data stream requires custom dictionary" show DictionaryMismatch = modprefix "given dictionary does not match the expected one" show (DataFormatError detail) = modprefix ("compressed data stream format error (" ++ detail ++ ")") modprefix :: ShowS modprefix = ("Codec.Compression.Zlib: " ++) instance Exception DecompressError -- | A fold over the 'DecompressStream' in the given monad. -- -- One way to look at this is that it runs the stream, using callback functions -- for the four stream events. -- foldDecompressStream :: Monad m => ((S.ByteString -> m a) -> m a) -> (S.ByteString -> m a -> m a) -> (S.ByteString -> m a) -> (DecompressError -> m a) -> DecompressStream m -> m a foldDecompressStream input output end err = fold where fold (DecompressInputRequired next) = input (\x -> next x >>= fold) fold (DecompressOutputAvailable outchunk next) = output outchunk (next >>= fold) fold (DecompressStreamEnd inchunk) = end inchunk fold (DecompressStreamError derr) = err derr -- | A variant on 'foldCompressStream' that is pure rather than operating in a -- monad and where the input is provided by a lazy 'L.ByteString'. So we only -- have to deal with the output, end and error parts, making it like a foldr on -- a list of output chunks. -- -- For example: -- -- > toChunks = foldDecompressStreamWithInput (:) [] throw -- foldDecompressStreamWithInput :: (S.ByteString -> a -> a) -> (L.ByteString -> a) -> (DecompressError -> a) -> (forall s. DecompressStream (ST s)) -> L.ByteString -> a foldDecompressStreamWithInput chunk end err = \s lbs -> runST (fold s (toLimitedChunks lbs)) where fold (DecompressInputRequired next) [] = next S.empty >>= \strm -> fold strm [] fold (DecompressInputRequired next) (inchunk:inchunks) = next inchunk >>= \s -> fold s inchunks fold (DecompressOutputAvailable outchunk next) inchunks = do r <- next >>= \s -> fold s inchunks return $ chunk outchunk r fold (DecompressStreamEnd inchunk) inchunks = return $ end (L.fromChunks (inchunk:inchunks)) fold (DecompressStreamError derr) _ = return $ err derr -- $incremental-compression -- The pure 'Codec.Compression.Zlib.Internal.compress' and -- 'Codec.Compression.Zlib.Internal.decompress' functions are streaming in the sense -- that they can produce output without demanding all input, however they need -- the input data stream as a lazy 'L.ByteString'. Having the input data -- stream as a lazy 'L.ByteString' often requires using lazy I\/O which is not -- appropriate in all circumstances. -- -- For these cases an incremental interface is more appropriate. This interface -- allows both incremental input and output. Chunks of input data are supplied -- one by one (e.g. as they are obtained from an input source like a file or -- network source). Output is also produced chunk by chunk. -- -- The incremental input and output is managed via the 'CompressStream' and -- 'DecompressStream' types. They represents the unfolding of the process of -- compressing and decompressing. They operates in either the 'ST' or 'IO' -- monads. They can be lifted into other incremental abstractions like pipes or -- conduits, or they can be used directly in the following style. -- $using-incremental-compression -- -- In a loop: -- -- * Inspect the status of the stream -- -- * When it is 'CompressInputRequired' then you should call the action, -- passing a chunk of input (or 'BS.empty' when no more input is available) -- to get the next state of the stream and continue the loop. -- -- * When it is 'CompressOutputAvailable' then do something with the given -- chunk of output, and call the action to get the next state of the stream -- and continue the loop. -- -- * When it is 'CompressStreamEnd' then terminate the loop. -- -- Note that you cannot stop as soon as you have no more input, you need to -- carry on until all the output has been collected, i.e. until you get to -- 'CompressStreamEnd'. -- -- Here is an example where we get input from one file handle and send the -- compressed output to another file handle. -- -- > go :: Handle -> Handle -> CompressStream IO -> IO () -- > go inh outh (CompressInputRequired next) = do -- > inchunk <- BS.hGet inh 4096 -- > go inh outh =<< next inchunk -- > go inh outh (CompressOutputAvailable outchunk next) = -- > BS.hPut outh outchunk -- > go inh outh =<< next -- > go _ _ CompressStreamEnd = return () -- -- The same can be achieved with 'foldCompressStream': -- -- > foldCompressStream -- > (\next -> do inchunk <- BS.hGet inh 4096; next inchunk) -- > (\outchunk next -> do BS.hPut outh outchunk; next) -- > (return ()) -- $using-incremental-decompression -- -- The use of 'DecompressStream' is very similar to 'CompressStream' but with -- a few differences: -- -- * There is the extra possibility of a 'DecompressStreamError' -- -- * There can be extra trailing data after a compressed stream, and the -- 'DecompressStreamEnd' includes that. -- -- Otherwise the same loop style applies, and there are fold functions. -- | The unfolding of the compression process, where you provide a sequence -- of uncompressed data chunks as input and receive a sequence of compressed -- data chunks as output. The process is incremental, in that the demand for -- input and provision of output are interleaved. -- data CompressStream m = CompressInputRequired { compressSupplyInput :: S.ByteString -> m (CompressStream m) } | CompressOutputAvailable { compressOutput :: !S.ByteString, compressNext :: m (CompressStream m) } | CompressStreamEnd -- | A fold over the 'CompressStream' in the given monad. -- -- One way to look at this is that it runs the stream, using callback functions -- for the three stream events. -- foldCompressStream :: Monad m => ((S.ByteString -> m a) -> m a) -> (S.ByteString -> m a -> m a) -> m a -> CompressStream m -> m a foldCompressStream input output end = fold where fold (CompressInputRequired next) = input (\x -> next x >>= fold) fold (CompressOutputAvailable outchunk next) = output outchunk (next >>= fold) fold CompressStreamEnd = end -- | A variant on 'foldCompressStream' that is pure rather than operating in a -- monad and where the input is provided by a lazy 'L.ByteString'. So we only -- have to deal with the output and end parts, making it just like a foldr on a -- list of output chunks. -- -- For example: -- -- > toChunks = foldCompressStreamWithInput (:) [] -- foldCompressStreamWithInput :: (S.ByteString -> a -> a) -> a -> (forall s. CompressStream (ST s)) -> L.ByteString -> a foldCompressStreamWithInput chunk end = \s lbs -> runST (fold s (toLimitedChunks lbs)) where fold (CompressInputRequired next) [] = next S.empty >>= \strm -> fold strm [] fold (CompressInputRequired next) (inchunk:inchunks) = next inchunk >>= \s -> fold s inchunks fold (CompressOutputAvailable outchunk next) inchunks = do r <- next >>= \s -> fold s inchunks return $ chunk outchunk r fold CompressStreamEnd _inchunks = return end -- | Compress a data stream provided as a lazy 'L.ByteString'. -- -- There are no expected error conditions. All input data streams are valid. It -- is possible for unexpected errors to occur, such as running out of memory, -- or finding the wrong version of the zlib C library, these are thrown as -- exceptions. -- compress :: Stream.Format -> CompressParams -> L.ByteString -> L.ByteString -- | Incremental compression in the 'ST' monad. Using 'ST' makes it possible -- to write pure /lazy/ functions while making use of incremental compression. -- -- Chunk size must fit into t'CUInt'. compressST :: Stream.Format -> CompressParams -> CompressStream (ST s) -- | Incremental compression in the 'IO' monad. -- -- Chunk size must fit into t'CUInt'. compressIO :: Stream.Format -> CompressParams -> CompressStream IO compress format params = foldCompressStreamWithInput L.Chunk L.Empty (compressStreamST format params) compressST format params = compressStreamST format params compressIO format params = compressStreamIO format params -- | Chunk size must fit into t'CUInt'. compressStream :: Stream.Format -> CompressParams -> S.ByteString -> Stream (CompressStream Stream) compressStream format (CompressParams compLevel method bits memLevel strategy initChunkSize mdict) = \chunk -> do Stream.deflateInit format compLevel method bits memLevel strategy setDictionary mdict withBS chunk $ \inFPtr length -> if length == 0 then fillBuffers 20 --gzip header is 20 bytes, others even smaller else do Stream.pushInputBuffer inFPtr 0 (int2cuint length) fillBuffers (int2cuint_capped initChunkSize) where -- we flick between two states: -- * where one or other buffer is empty -- - in which case we refill one or both -- * where both buffers are non-empty -- - in which case we compress until a buffer is empty fillBuffers :: CUInt -> Stream (CompressStream Stream) fillBuffers outChunkSize = do #ifdef DEBUG Stream.consistencyCheck #endif -- in this state there are two possibilities: -- * no output buffer space is available -- - in which case we must make more available -- * no input buffer is available -- - in which case we must supply more inputBufferEmpty <- Stream.inputBufferEmpty outputBufferFull <- Stream.outputBufferFull assert (inputBufferEmpty || outputBufferFull) $ return () when outputBufferFull $ do outFPtr <- Stream.unsafeLiftIO (S.mallocByteString (cuint2int outChunkSize)) Stream.pushOutputBuffer outFPtr 0 outChunkSize if inputBufferEmpty then return $ CompressInputRequired $ flip withBS $ \inFPtr length -> if length == 0 then drainBuffers True else do Stream.pushInputBuffer inFPtr 0 (int2cuint length) drainBuffers False else drainBuffers False drainBuffers :: Bool -> Stream (CompressStream Stream) drainBuffers lastChunk = do inputBufferEmpty' <- Stream.inputBufferEmpty outputBufferFull' <- Stream.outputBufferFull assert(not outputBufferFull' && (lastChunk || not inputBufferEmpty')) $ return () -- this invariant guarantees we can always make forward progress -- and that therefore a BufferError is impossible let flush = if lastChunk then Stream.Finish else Stream.NoFlush status <- Stream.deflate flush case status of Stream.Ok -> do outputBufferFull <- Stream.outputBufferFull if outputBufferFull then do (outFPtr, offset, length) <- Stream.popOutputBuffer let chunk = mkBS outFPtr offset length return $ CompressOutputAvailable chunk $ do fillBuffers defaultCompressBufferSize else do fillBuffers defaultCompressBufferSize Stream.StreamEnd -> do inputBufferEmpty <- Stream.inputBufferEmpty assert inputBufferEmpty $ return () outputBufferBytesAvailable <- Stream.outputBufferBytesAvailable if outputBufferBytesAvailable > 0 then do (outFPtr, offset, length) <- Stream.popOutputBuffer let chunk = mkBS outFPtr offset length Stream.finalise return $ CompressOutputAvailable chunk (return CompressStreamEnd) else do Stream.finalise return CompressStreamEnd Stream.Error code msg -> case code of Stream.BufferError -> fail "BufferError should be impossible!" Stream.NeedDict _ -> fail "NeedDict is impossible!" _ -> fail msg -- Set the custom dictionary, if we were provided with one -- and if the format supports it (zlib and raw, not gzip). setDictionary :: Maybe S.ByteString -> Stream () setDictionary (Just dict) | Stream.formatSupportsDictionary format = case int2cuint_safe (S.length dict) of Nothing -> fail "error when setting deflate dictionary, its length does not fit into CUInt" Just{} -> do status <- Stream.deflateSetDictionary dict case status of Stream.Ok -> return () Stream.Error _ msg -> fail msg _ -> fail "error when setting deflate dictionary" setDictionary _ = return () -- | Decompress a data stream provided as a lazy 'L.ByteString'. -- -- It will throw an exception if any error is encountered in the input data. -- If you need more control over error handling then use one the incremental -- versions, 'decompressST' or 'decompressIO'. -- decompress :: Stream.Format -> DecompressParams -> L.ByteString -> L.ByteString -- | Incremental decompression in the 'ST' monad. Using 'ST' makes it possible -- to write pure /lazy/ functions while making use of incremental decompression. -- -- Chunk size must fit into t'CUInt'. decompressST :: Stream.Format -> DecompressParams -> DecompressStream (ST s) -- | Incremental decompression in the 'IO' monad. -- -- Chunk size must fit into t'CUInt'. decompressIO :: Stream.Format -> DecompressParams -> DecompressStream IO decompress format params = foldDecompressStreamWithInput L.Chunk (const L.Empty) throw (decompressStreamST format params) decompressST format params = decompressStreamST format params decompressIO format params = decompressStreamIO format params -- | Chunk size must fit into t'CUInt'. decompressStream :: Stream.Format -> DecompressParams -> Bool -> S.ByteString -> Stream (DecompressStream Stream) decompressStream format (DecompressParams bits initChunkSize mdict allMembers) resume = \chunk -> do inputBufferEmpty <- Stream.inputBufferEmpty outputBufferFull <- Stream.outputBufferFull assert inputBufferEmpty $ if resume then assert (format == Stream.gzipFormat && allMembers) $ Stream.inflateReset else assert outputBufferFull $ Stream.inflateInit format bits withBS chunk $ \inFPtr length -> if length == 0 then do -- special case to avoid demanding more input again -- always an error anyway when outputBufferFull $ do outFPtr <- Stream.unsafeLiftIO (S.mallocByteString 1) Stream.pushOutputBuffer outFPtr 0 1 drainBuffers True else do Stream.pushInputBuffer inFPtr 0 (int2cuint length) -- Normally we start with no output buffer (so counts as full) but -- if we're resuming then we'll usually still have output buffer -- space available assert (if not resume then outputBufferFull else True) $ return () if outputBufferFull then fillBuffers (int2cuint_capped initChunkSize) else drainBuffers False where -- we flick between two states: -- * where one or other buffer is empty -- - in which case we refill one or both -- * where both buffers are non-empty -- - in which case we compress until a buffer is empty fillBuffers :: CUInt -> Stream (DecompressStream Stream) fillBuffers outChunkSize = do #ifdef DEBUG Stream.consistencyCheck #endif -- in this state there are two possibilities: -- * no output buffer space is available -- - in which case we must make more available -- * no input buffer is available -- - in which case we must supply more inputBufferEmpty <- Stream.inputBufferEmpty outputBufferFull <- Stream.outputBufferFull assert (inputBufferEmpty || outputBufferFull) $ return () when outputBufferFull $ do outFPtr <- Stream.unsafeLiftIO (S.mallocByteString (cuint2int outChunkSize)) Stream.pushOutputBuffer outFPtr 0 outChunkSize if inputBufferEmpty then return $ DecompressInputRequired $ \chunk -> withBS chunk $ \inFPtr length -> if length == 0 then drainBuffers True else do Stream.pushInputBuffer inFPtr 0 (int2cuint length) drainBuffers False else drainBuffers False drainBuffers :: Bool -> Stream (DecompressStream Stream) drainBuffers lastChunk = do inputBufferEmpty' <- Stream.inputBufferEmpty outputBufferFull' <- Stream.outputBufferFull assert(not outputBufferFull' && (lastChunk || not inputBufferEmpty')) $ return () -- this invariant guarantees we can always make forward progress or at -- least if a BufferError does occur that it must be due to a premature EOF status <- Stream.inflate Stream.NoFlush case status of Stream.Ok -> do outputBufferFull <- Stream.outputBufferFull if outputBufferFull then do (outFPtr, offset, length) <- Stream.popOutputBuffer let chunk = mkBS outFPtr offset length return $ DecompressOutputAvailable chunk $ do fillBuffers defaultDecompressBufferSize else do fillBuffers defaultDecompressBufferSize Stream.StreamEnd -> do -- The decompressor tells us we're done. -- Note that there may be input bytes still available if the stream is -- embedded in some other data stream, so we return any trailing data. inputBufferEmpty <- Stream.inputBufferEmpty if inputBufferEmpty then do finish (DecompressStreamEnd S.empty) else do (inFPtr, offset, length) <- Stream.popRemainingInputBuffer let inchunk = mkBS inFPtr offset length finish (DecompressStreamEnd inchunk) Stream.Error code msg -> case code of Stream.BufferError -> finish (DecompressStreamError TruncatedInput) Stream.NeedDict adler -> do err <- setDictionary adler mdict case err of Just streamErr -> finish streamErr Nothing -> drainBuffers lastChunk Stream.DataError -> finish (DecompressStreamError (DataFormatError msg)) _ -> fail msg -- Note even if we end with an error we still try to flush the last chunk if -- there is one. The user just has to decide what they want to trust. finish end = do outputBufferBytesAvailable <- Stream.outputBufferBytesAvailable if outputBufferBytesAvailable > 0 then do (outFPtr, offset, length) <- Stream.popOutputBuffer return (DecompressOutputAvailable (mkBS outFPtr offset length) (return end)) else return end setDictionary :: Stream.DictionaryHash -> Maybe S.ByteString -> Stream (Maybe (DecompressStream Stream)) setDictionary _adler Nothing = return $ Just (DecompressStreamError DictionaryRequired) setDictionary _adler (Just dict) = case int2cuint_safe (S.length dict) of Nothing -> fail "error when setting inflate dictionary, its length does not fit into CUInt" Just{} -> do status <- Stream.inflateSetDictionary dict case status of Stream.Ok -> return Nothing Stream.Error Stream.DataError _ -> return $ Just (DecompressStreamError DictionaryMismatch) _ -> fail "error when setting inflate dictionary" ------------------------------------------------------------------------------ mkStateST :: ST s (Stream.State s) mkStateIO :: IO (Stream.State RealWorld) mkStateST = strictToLazyST Stream.mkState mkStateIO = stToIO Stream.mkState runStreamST :: Stream a -> Stream.State s -> ST s (a, Stream.State s) runStreamIO :: Stream a -> Stream.State RealWorld -> IO (a, Stream.State RealWorld) runStreamST strm zstate = strictToLazyST (Unsafe.unsafeIOToST noDuplicate >> Stream.runStream strm zstate) runStreamIO strm zstate = stToIO (Stream.runStream strm zstate) -- | Chunk size must fit into t'CUInt'. compressStreamIO :: Stream.Format -> CompressParams -> CompressStream IO compressStreamIO format params = CompressInputRequired { compressSupplyInput = \chunk -> do zstate <- mkStateIO let next = compressStream format params (strm', zstate') <- runStreamIO (next chunk) zstate return (go strm' zstate') } where go :: CompressStream Stream -> Stream.State RealWorld -> CompressStream IO go (CompressInputRequired next) zstate = CompressInputRequired { compressSupplyInput = \chunk -> do (strm', zstate') <- runStreamIO (next chunk) zstate return (go strm' zstate') } go (CompressOutputAvailable chunk next) zstate = CompressOutputAvailable chunk $ do (strm', zstate') <- runStreamIO next zstate return (go strm' zstate') go CompressStreamEnd _ = CompressStreamEnd -- | Chunk size must fit into t'CUInt'. compressStreamST :: Stream.Format -> CompressParams -> CompressStream (ST s) compressStreamST format params = CompressInputRequired { compressSupplyInput = \chunk -> do zstate <- mkStateST let next = compressStream format params (strm', zstate') <- runStreamST (next chunk) zstate return (go strm' zstate') } where go :: CompressStream Stream -> Stream.State s -> CompressStream (ST s) go (CompressInputRequired next) zstate = CompressInputRequired { compressSupplyInput = \chunk -> do (strm', zstate') <- runStreamST (next chunk) zstate return (go strm' zstate') } go (CompressOutputAvailable chunk next) zstate = CompressOutputAvailable chunk $ do (strm', zstate') <- runStreamST next zstate return (go strm' zstate') go CompressStreamEnd _ = CompressStreamEnd -- | Chunk size must fit into t'CUInt'. decompressStreamIO :: Stream.Format -> DecompressParams -> DecompressStream IO decompressStreamIO format params = DecompressInputRequired $ \chunk -> do zstate <- mkStateIO let next = decompressStream format params False (strm', zstate') <- runStreamIO (next chunk) zstate go strm' zstate' (S.null chunk) where go :: DecompressStream Stream -> Stream.State RealWorld -> Bool -> IO (DecompressStream IO) go (DecompressInputRequired next) zstate !_ = return $ DecompressInputRequired $ \chunk -> do (strm', zstate') <- runStreamIO (next chunk) zstate go strm' zstate' (S.null chunk) go (DecompressOutputAvailable chunk next) zstate !eof = return $ DecompressOutputAvailable chunk $ do (strm', zstate') <- runStreamIO next zstate go strm' zstate' eof go (DecompressStreamEnd unconsumed) zstate !eof | format == Stream.gzipFormat , decompressAllMembers params , not eof = tryFollowingStream unconsumed zstate | otherwise = finaliseStreamEnd unconsumed zstate go (DecompressStreamError err) zstate !_ = finaliseStreamError err zstate tryFollowingStream :: S.ByteString -> Stream.State RealWorld -> IO (DecompressStream IO) tryFollowingStream chunk zstate = case S.length chunk of 0 -> return $ DecompressInputRequired $ \chunk' -> case S.length chunk' of 0 -> finaliseStreamEnd S.empty zstate 1 | S.head chunk' /= 0x1f -> finaliseStreamEnd chunk' zstate 1 -> return $ DecompressInputRequired $ \chunk'' -> case S.length chunk'' of 0 -> finaliseStreamEnd chunk' zstate _ -> checkHeaderSplit (S.head chunk') chunk'' zstate _ -> checkHeader chunk' zstate 1 -> return $ DecompressInputRequired $ \chunk' -> case S.length chunk' of 0 -> finaliseStreamEnd chunk zstate _ -> checkHeaderSplit (S.head chunk) chunk' zstate _ -> checkHeader chunk zstate checkHeaderSplit :: Word8 -> S.ByteString -> Stream.State RealWorld -> IO (DecompressStream IO) checkHeaderSplit 0x1f chunk zstate | S.head chunk == 0x8b = do let resume = decompressStream format params True (S.pack [0x1f, 0x8b]) if S.length chunk > 1 then do -- have to handle the remaining data in this chunk (DecompressInputRequired next, zstate') <- runStreamIO resume zstate (strm', zstate'') <- runStreamIO (next (S.tail chunk)) zstate' go strm' zstate'' False else do -- subtle special case when the chunk tail is empty -- yay for QC tests (strm, zstate') <- runStreamIO resume zstate go strm zstate' False checkHeaderSplit byte chunk zstate = finaliseStreamEnd (S.cons byte chunk) zstate checkHeader :: S.ByteString -> Stream.State RealWorld -> IO (DecompressStream IO) checkHeader chunk zstate | S.index chunk 0 == 0x1f , S.index chunk 1 == 0x8b = do let resume = decompressStream format params True chunk (strm', zstate') <- runStreamIO resume zstate go strm' zstate' False checkHeader chunk zstate = finaliseStreamEnd chunk zstate finaliseStreamEnd unconsumed zstate = do _ <- runStreamIO Stream.finalise zstate return (DecompressStreamEnd unconsumed) finaliseStreamError err zstate = do _ <- runStreamIO Stream.finalise zstate return (DecompressStreamError err) -- | Chunk size must fit into t'CUInt'. decompressStreamST :: Stream.Format -> DecompressParams -> DecompressStream (ST s) decompressStreamST format params = DecompressInputRequired $ \chunk -> do zstate <- mkStateST let next = decompressStream format params False (strm', zstate') <- runStreamST (next chunk) zstate go strm' zstate' (S.null chunk) where go :: DecompressStream Stream -> Stream.State s -> Bool -> ST s (DecompressStream (ST s)) go (DecompressInputRequired next) zstate !_ = return $ DecompressInputRequired $ \chunk -> do (strm', zstate') <- runStreamST (next chunk) zstate go strm' zstate' (S.null chunk) go (DecompressOutputAvailable chunk next) zstate !eof = return $ DecompressOutputAvailable chunk $ do (strm', zstate') <- runStreamST next zstate go strm' zstate' eof go (DecompressStreamEnd unconsumed) zstate !eof | format == Stream.gzipFormat , decompressAllMembers params , not eof = tryFollowingStream unconsumed zstate | otherwise = finaliseStreamEnd unconsumed zstate go (DecompressStreamError err) zstate !_ = finaliseStreamError err zstate tryFollowingStream :: S.ByteString -> Stream.State s -> ST s (DecompressStream (ST s)) tryFollowingStream chunk zstate = case S.length chunk of 0 -> return $ DecompressInputRequired $ \chunk' -> case S.length chunk' of 0 -> finaliseStreamEnd S.empty zstate 1 | S.head chunk' /= 0x1f -> finaliseStreamEnd chunk' zstate 1 -> return $ DecompressInputRequired $ \chunk'' -> case S.length chunk'' of 0 -> finaliseStreamEnd chunk' zstate _ -> checkHeaderSplit (S.head chunk') chunk'' zstate _ -> checkHeader chunk' zstate 1 -> return $ DecompressInputRequired $ \chunk' -> case S.length chunk' of 0 -> finaliseStreamEnd chunk zstate _ -> checkHeaderSplit (S.head chunk) chunk' zstate _ -> checkHeader chunk zstate checkHeaderSplit :: Word8 -> S.ByteString -> Stream.State s -> ST s (DecompressStream (ST s)) checkHeaderSplit 0x1f chunk zstate | S.head chunk == 0x8b = do let resume = decompressStream format params True (S.pack [0x1f, 0x8b]) if S.length chunk > 1 then do -- have to handle the remaining data in this chunk (x, zstate') <- runStreamST resume zstate let next = case x of DecompressInputRequired n -> n _ -> error "checkHeaderSplit: unexpected result of runStreamST" (strm', zstate'') <- runStreamST (next (S.tail chunk)) zstate' go strm' zstate'' False else do -- subtle special case when the chunk tail is empty -- yay for QC tests (strm, zstate') <- runStreamST resume zstate go strm zstate' False checkHeaderSplit byte chunk zstate = finaliseStreamEnd (S.cons byte chunk) zstate checkHeader :: S.ByteString -> Stream.State s -> ST s (DecompressStream (ST s)) checkHeader chunk zstate | S.index chunk 0 == 0x1f , S.index chunk 1 == 0x8b = do let resume = decompressStream format params True chunk (strm', zstate') <- runStreamST resume zstate go strm' zstate' False checkHeader chunk zstate = finaliseStreamEnd chunk zstate finaliseStreamEnd unconsumed zstate = do _ <- runStreamST Stream.finalise zstate return (DecompressStreamEnd unconsumed) finaliseStreamError err zstate = do _ <- runStreamST Stream.finalise zstate return (DecompressStreamError err) -- | This one should not fail on 64-bit arch. cuint2int :: CUInt -> Int cuint2int n = fromMaybe (error $ "cuint2int: cannot cast " ++ show n) $ toIntegralSized n -- | This one could and will fail if chunks of ByteString are longer than 4G. int2cuint :: Int -> CUInt int2cuint n = fromMaybe (error $ "int2cuint: cannot cast " ++ show n) $ toIntegralSized n int2cuint_capped :: Int -> CUInt int2cuint_capped = fromMaybe maxBound . toIntegralSized . max 0 int2cuint_safe :: Int -> Maybe CUInt int2cuint_safe = toIntegralSized toLimitedChunks :: L.ByteString -> [S.ByteString] toLimitedChunks L.Empty = [] toLimitedChunks (L.Chunk x xs) = case int2cuint_safe (S.length x) of Nothing -> let (y, z) = S.splitAt (cuint2int (maxBound :: CUInt)) x in y : toLimitedChunks (L.Chunk z xs) Just{} -> x : toLimitedChunks xs zlib-0.7.1.1/Codec/Compression/Zlib/Raw.hs0000644000000000000000000000431007346545000016310 0ustar0000000000000000----------------------------------------------------------------------------- -- | -- Copyright : (c) 2006-2014 Duncan Coutts -- License : BSD-style -- -- Maintainer : duncan@community.haskell.org -- -- Compression and decompression of data streams in the raw deflate format. -- -- The format is described in detail in RFC #1951: -- -- -- See also the zlib home page: -- ----------------------------------------------------------------------------- module Codec.Compression.Zlib.Raw ( -- * Simple compression and decompression compress, decompress, DecompressError(..), -- * Extended API with control over compression parameters compressWith, decompressWith, CompressParams(..), defaultCompressParams, DecompressParams(..), defaultDecompressParams, -- ** The compression parameter types CompressionLevel(..), defaultCompression, noCompression, bestSpeed, bestCompression, compressionLevel, Method, deflateMethod, WindowBits(..), defaultWindowBits, windowBits, MemoryLevel(..), defaultMemoryLevel, minMemoryLevel, maxMemoryLevel, memoryLevel, CompressionStrategy, defaultStrategy, filteredStrategy, huffmanOnlyStrategy, rleStrategy, fixedStrategy, ) where import Data.ByteString.Lazy (ByteString) import qualified Codec.Compression.Zlib.Internal as Internal import Codec.Compression.Zlib.Internal hiding (compress, decompress) -- | Decompress a stream of data in the raw deflate format. decompress :: ByteString -> ByteString decompress = decompressWith defaultDecompressParams -- | Like 'Codec.Compression.Zlib.Raw.decompress' but with the ability to specify various decompression -- parameters. decompressWith :: DecompressParams -> ByteString -> ByteString decompressWith = Internal.decompress rawFormat -- | Compress a stream of data into the raw deflate format. compress :: ByteString -> ByteString compress = compressWith defaultCompressParams -- | Like 'Codec.Compression.Zlib.Raw.compress' but with the ability to specify various decompression -- parameters. compressWith :: CompressParams -> ByteString -> ByteString compressWith = Internal.compress rawFormat zlib-0.7.1.1/Codec/Compression/Zlib/Stream.hsc0000644000000000000000000007601007346545000017163 0ustar0000000000000000{-# LANGUAGE CPP, ForeignFunctionInterface #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE CApiFFI #-} ----------------------------------------------------------------------------- -- | -- Copyright : (c) 2006-2015 Duncan Coutts -- License : BSD-style -- -- Maintainer : duncan@community.haskell.org -- -- Zlib wrapper layer -- ----------------------------------------------------------------------------- module Codec.Compression.Zlib.Stream ( -- * The Zlib state monad Stream, State, mkState, runStream, unsafeLiftIO, finalise, -- * Initialisation deflateInit, inflateInit, -- ** Initialisation parameters Format, gzipFormat, zlibFormat, rawFormat, gzipOrZlibFormat, formatSupportsDictionary, CompressionLevel(..), defaultCompression, noCompression, bestSpeed, bestCompression, compressionLevel, Method, deflateMethod, WindowBits(..), defaultWindowBits, windowBits, MemoryLevel(..), defaultMemoryLevel, minMemoryLevel, maxMemoryLevel, memoryLevel, CompressionStrategy, defaultStrategy, filteredStrategy, huffmanOnlyStrategy, rleStrategy, fixedStrategy, -- * The business deflate, inflate, Status(..), Flush(..), ErrorCode(..), -- ** Special operations inflateReset, -- * Buffer management -- ** Input buffer pushInputBuffer, inputBufferEmpty, popRemainingInputBuffer, -- ** Output buffer pushOutputBuffer, popOutputBuffer, outputBufferBytesAvailable, outputBufferSpaceRemaining, outputBufferFull, -- ** Dictionary deflateSetDictionary, inflateSetDictionary, -- ** Dictionary hashes DictionaryHash, dictionaryHash, zeroDictionaryHash, #ifdef DEBUG -- * Debugging consistencyCheck, dump, trace, #endif ) where import Foreign ( Word8, Ptr, nullPtr, plusPtr, castPtr, peekByteOff, pokeByteOff , ForeignPtr, FinalizerPtr, mallocForeignPtrBytes, addForeignPtrFinalizer , withForeignPtr, touchForeignPtr, minusPtr ) import Foreign.ForeignPtr.Unsafe ( unsafeForeignPtrToPtr ) import System.IO.Unsafe ( unsafePerformIO ) import Foreign ( finalizeForeignPtr ) import Foreign.C #if MIN_VERSION_base(4,18,0) import Foreign.C.ConstPtr #endif import Data.ByteString.Internal (nullForeignPtr) import qualified Data.ByteString.Unsafe as B import Data.ByteString (ByteString) import Control.Applicative (Applicative(..)) import Control.Monad (ap,liftM) import qualified Control.Monad.Fail as Fail import Control.Monad.ST.Strict import Control.Monad.ST.Unsafe import Control.Exception (assert) import Data.Bits (toIntegralSized) import Data.Coerce (coerce) import Data.Maybe (fromMaybe) import GHC.Generics (Generic) #ifdef DEBUG import System.IO (hPutStrLn, stderr) #endif import Prelude hiding (length, Applicative(..)) #include "zlib.h" pushInputBuffer :: ForeignPtr Word8 -> Int -> CUInt -> Stream () pushInputBuffer inBuf' offset length = do -- must not push a new input buffer if the last one is not used up inAvail <- getInAvail assert (inAvail == 0) $ return () -- Now that we're setting a new input buffer, we can be sure that zlib no -- longer has a reference to the old one. Therefore this is the last point -- at which the old buffer had to be retained. It's safe to release now. inBuf <- getInBuf unsafeLiftIO $ touchForeignPtr inBuf -- now set the available input buffer ptr and length setInBuf inBuf' setInAvail length setInNext (unsafeForeignPtrToPtr inBuf' `plusPtr` offset) -- Note the 'unsafe'. We are passing the raw ptr inside inBuf' to zlib. -- To make this safe we need to hold on to the ForeignPtr for at least as -- long as zlib is using the underlying raw ptr. inputBufferEmpty :: Stream Bool inputBufferEmpty = getInAvail >>= return . (==0) popRemainingInputBuffer :: Stream (ForeignPtr Word8, Int, Int) popRemainingInputBuffer = do inBuf <- getInBuf inNext <- getInNext inAvail <- getInAvail -- there really should be something to pop, otherwise it's silly assert (inAvail > 0) $ return () setInAvail 0 return (inBuf, inNext `minusPtr` unsafeForeignPtrToPtr inBuf, inAvail) pushOutputBuffer :: ForeignPtr Word8 -> Int -> CUInt -> Stream () pushOutputBuffer outBuf' offset length = do --must not push a new buffer if there is still data in the old one outAvail <- getOutAvail assert (outAvail == 0) $ return () -- Note that there may still be free space in the output buffer, that's ok, -- you might not want to bother completely filling the output buffer say if -- there's only a few free bytes left. outBuf <- getOutBuf unsafeLiftIO $ touchForeignPtr outBuf -- now set the available input buffer ptr and length setOutBuf outBuf' setOutFree length setOutNext (unsafeForeignPtrToPtr outBuf' `plusPtr` offset) setOutOffset offset setOutAvail 0 -- get that part of the output buffer that is currently full -- (might be 0, use outputBufferBytesAvailable to check) -- this may leave some space remaining in the buffer, use -- outputBufferSpaceRemaining to check. popOutputBuffer :: Stream (ForeignPtr Word8, Int, Int) popOutputBuffer = do outBuf <- getOutBuf outOffset <- getOutOffset outAvail <- getOutAvail -- there really should be something to pop, otherwise it's silly assert (outAvail > 0) $ return () setOutOffset (outOffset + outAvail) setOutAvail 0 return (outBuf, outOffset, outAvail) -- this is the number of bytes available in the output buffer outputBufferBytesAvailable :: Stream Int outputBufferBytesAvailable = getOutAvail -- you needn't get all the output immediately, you can continue until -- there is no more output space available, this tells you that amount outputBufferSpaceRemaining :: Stream Int outputBufferSpaceRemaining = getOutFree -- you only need to supply a new buffer when there is no more output buffer -- space remaining outputBufferFull :: Stream Bool outputBufferFull = liftM (==0) outputBufferSpaceRemaining -- you can only run this when the output buffer is not empty -- you can run it when the input buffer is empty but it doesn't do anything -- after running deflate either the output buffer will be full -- or the input buffer will be empty (or both) deflate :: Flush -> Stream Status deflate flush = do outFree <- getOutFree -- deflate needs free space in the output buffer assert (outFree > 0) $ return () result <- deflate_ flush outFree' <- getOutFree -- number of bytes of extra output there is available as a result of -- the call to deflate: let outExtra = outFree - outFree' outAvail <- getOutAvail setOutAvail (outAvail + outExtra) return result inflate :: Flush -> Stream Status inflate flush = do outFree <- getOutFree -- inflate needs free space in the output buffer assert (outFree > 0) $ return () result <- inflate_ flush outFree' <- getOutFree -- number of bytes of extra output there is available as a result of -- the call to inflate: let outExtra = outFree - outFree' outAvail <- getOutAvail setOutAvail (outAvail + outExtra) return result inflateReset :: Stream () inflateReset = do outAvail <- getOutAvail inAvail <- getInAvail -- At the point where this is used, all the output should have been consumed -- and any trailing input should be extracted and resupplied explicitly, not -- just left. assert (outAvail == 0 && inAvail == 0) $ return () err <- withStreamState $ \zstream -> c_inflateReset zstream failIfError err -- | Dictionary length must fit into t'CUInt'. deflateSetDictionary :: ByteString -> Stream Status deflateSetDictionary dict = do err <- withStreamState $ \zstream -> B.unsafeUseAsCStringLen dict $ \(ptr, len) -> c_deflateSetDictionary zstream (castPtr ptr) (int2cuint len) toStatus err -- | Dictionary length must fit into t'CUInt'. inflateSetDictionary :: ByteString -> Stream Status inflateSetDictionary dict = do err <- withStreamState $ \zstream -> do B.unsafeUseAsCStringLen dict $ \(ptr, len) -> c_inflateSetDictionary zstream (castPtr ptr) (int2cuint len) toStatus err -- | A hash of a custom compression dictionary. These hashes are used by -- zlib as dictionary identifiers. -- (The particular hash function used is Adler32.) -- newtype DictionaryHash = DictHash CULong deriving (Eq, Ord, Read, Show) -- | Update a running 'DictionaryHash'. You can generate a 'DictionaryHash' -- from one or more 'ByteString's by starting from 'zeroDictionaryHash', e.g. -- -- > dictionaryHash zeroDictionaryHash :: ByteString -> DictionaryHash -- -- or -- -- > foldl' dictionaryHash zeroDictionaryHash :: [ByteString] -> DictionaryHash -- -- Dictionary length must fit into t'CUInt'. dictionaryHash :: DictionaryHash -> ByteString -> DictionaryHash dictionaryHash (DictHash adler) dict = unsafePerformIO $ B.unsafeUseAsCStringLen dict $ \(ptr, len) -> liftM DictHash $ c_adler32 adler (castPtr ptr) (int2cuint len) -- | A zero 'DictionaryHash' to use as the initial value with 'dictionaryHash'. -- zeroDictionaryHash :: DictionaryHash zeroDictionaryHash = DictHash 0 ---------------------------- -- Stream monad -- newtype Stream a = Z { unZ :: ForeignPtr StreamState -> ForeignPtr Word8 -> ForeignPtr Word8 -> Int -> Int -> IO (ForeignPtr Word8 ,ForeignPtr Word8 ,Int, Int, a) } instance Functor Stream where fmap = liftM instance Applicative Stream where pure = returnZ (<*>) = ap (*>) = thenZ_ instance Monad Stream where (>>=) = thenZ -- m >>= f = (m `thenZ` \a -> consistencyCheck `thenZ_` returnZ a) `thenZ` f (>>) = (*>) #if !MIN_VERSION_base(4,13,0) fail = Fail.fail #endif instance Fail.MonadFail Stream where fail = (finalise >>) . failZ returnZ :: a -> Stream a returnZ a = Z $ \_ inBuf outBuf outOffset outLength -> return (inBuf, outBuf, outOffset, outLength, a) {-# INLINE returnZ #-} thenZ :: Stream a -> (a -> Stream b) -> Stream b thenZ (Z m) f = Z $ \stream inBuf outBuf outOffset outLength -> m stream inBuf outBuf outOffset outLength >>= \(inBuf', outBuf', outOffset', outLength', a) -> unZ (f a) stream inBuf' outBuf' outOffset' outLength' {-# INLINE thenZ #-} thenZ_ :: Stream a -> Stream b -> Stream b thenZ_ (Z m) f = Z $ \stream inBuf outBuf outOffset outLength -> m stream inBuf outBuf outOffset outLength >>= \(inBuf', outBuf', outOffset', outLength', _) -> unZ f stream inBuf' outBuf' outOffset' outLength' {-# INLINE thenZ_ #-} failZ :: String -> Stream a failZ msg = Z (\_ _ _ _ _ -> fail ("Codec.Compression.Zlib: " ++ msg)) data State s = State !(ForeignPtr StreamState) !(ForeignPtr Word8) !(ForeignPtr Word8) {-# UNPACK #-} !Int {-# UNPACK #-} !Int mkState :: ST s (State s) mkState = unsafeIOToST $ do stream <- mallocForeignPtrBytes (#{const sizeof(z_stream)}) withForeignPtr stream $ \ptr -> do #{poke z_stream, msg} ptr nullPtr #{poke z_stream, zalloc} ptr nullPtr #{poke z_stream, zfree} ptr nullPtr #{poke z_stream, opaque} ptr nullPtr #{poke z_stream, next_in} ptr nullPtr #{poke z_stream, next_out} ptr nullPtr #{poke z_stream, avail_in} ptr (0 :: CUInt) #{poke z_stream, avail_out} ptr (0 :: CUInt) return (State stream nullForeignPtr nullForeignPtr 0 0) runStream :: Stream a -> State s -> ST s (a, State s) runStream (Z m) (State stream inBuf outBuf outOffset outLength) = unsafeIOToST $ m stream inBuf outBuf outOffset outLength >>= \(inBuf', outBuf', outOffset', outLength', a) -> return (a, State stream inBuf' outBuf' outOffset' outLength') -- This is marked as unsafe because runStream uses unsafeIOToST so anything -- lifted here can end up being unsafePerformIO'd. unsafeLiftIO :: IO a -> Stream a unsafeLiftIO m = Z $ \_stream inBuf outBuf outOffset outLength -> do a <- m return (inBuf, outBuf, outOffset, outLength, a) getStreamState :: Stream (ForeignPtr StreamState) getStreamState = Z $ \stream inBuf outBuf outOffset outLength -> do return (inBuf, outBuf, outOffset, outLength, stream) getInBuf :: Stream (ForeignPtr Word8) getInBuf = Z $ \_stream inBuf outBuf outOffset outLength -> do return (inBuf, outBuf, outOffset, outLength, inBuf) getOutBuf :: Stream (ForeignPtr Word8) getOutBuf = Z $ \_stream inBuf outBuf outOffset outLength -> do return (inBuf, outBuf, outOffset, outLength, outBuf) getOutOffset :: Stream Int getOutOffset = Z $ \_stream inBuf outBuf outOffset outLength -> do return (inBuf, outBuf, outOffset, outLength, outOffset) getOutAvail :: Stream Int getOutAvail = Z $ \_stream inBuf outBuf outOffset outLength -> do return (inBuf, outBuf, outOffset, outLength, outLength) setInBuf :: ForeignPtr Word8 -> Stream () setInBuf inBuf = Z $ \_stream _ outBuf outOffset outLength -> do return (inBuf, outBuf, outOffset, outLength, ()) setOutBuf :: ForeignPtr Word8 -> Stream () setOutBuf outBuf = Z $ \_stream inBuf _ outOffset outLength -> do return (inBuf, outBuf, outOffset, outLength, ()) setOutOffset :: Int -> Stream () setOutOffset outOffset = Z $ \_stream inBuf outBuf _ outLength -> do return (inBuf, outBuf, outOffset, outLength, ()) setOutAvail :: Int -> Stream () setOutAvail outLength = Z $ \_stream inBuf outBuf outOffset _ -> do return (inBuf, outBuf, outOffset, outLength, ()) ---------------------------- -- Debug stuff -- #ifdef DEBUG trace :: String -> Stream () trace = unsafeLiftIO . hPutStrLn stderr dump :: Stream () dump = do inNext <- getInNext inAvail <- getInAvail outNext <- getOutNext outFree <- getOutFree outAvail <- getOutAvail outOffset <- getOutOffset unsafeLiftIO $ hPutStrLn stderr $ "Stream {\n" ++ " inNext = " ++ show inNext ++ ",\n" ++ " inAvail = " ++ show inAvail ++ ",\n" ++ "\n" ++ " outNext = " ++ show outNext ++ ",\n" ++ " outFree = " ++ show outFree ++ ",\n" ++ " outAvail = " ++ show outAvail ++ ",\n" ++ " outOffset = " ++ show outOffset ++ "\n" ++ "}" consistencyCheck consistencyCheck :: Stream () consistencyCheck = do outBuf <- getOutBuf outOffset <- getOutOffset outAvail <- getOutAvail outNext <- getOutNext let outBufPtr = unsafeForeignPtrToPtr outBuf assert (outBufPtr `plusPtr` (outOffset + outAvail) == outNext) $ return () #endif ---------------------------- -- zlib wrapper layer -- data Status = Ok | StreamEnd | Error ErrorCode String data ErrorCode = NeedDict DictionaryHash | FileError | StreamError | DataError | MemoryError | BufferError -- ^ No progress was possible or there was not enough room in -- the output buffer when 'Finish' is used. Note that -- 'BufferError' is not fatal, and 'inflate' can be called -- again with more input and more output space to continue. | VersionError | Unexpected toStatus :: CInt -> Stream Status toStatus errno = case errno of (#{const Z_OK}) -> return Ok (#{const Z_STREAM_END}) -> return StreamEnd (#{const Z_NEED_DICT}) -> do adler <- withStreamPtr (#{peek z_stream, adler}) err (NeedDict (DictHash adler)) "custom dictionary needed" (#{const Z_BUF_ERROR}) -> err BufferError "buffer error" (#{const Z_ERRNO}) -> err FileError "file error" (#{const Z_STREAM_ERROR}) -> err StreamError "stream error" (#{const Z_DATA_ERROR}) -> err DataError "data error" (#{const Z_MEM_ERROR}) -> err MemoryError "insufficient memory" (#{const Z_VERSION_ERROR}) -> err VersionError "incompatible zlib version" other -> return $ Error Unexpected ("unexpected zlib status: " ++ show other) where err errCode altMsg = liftM (Error errCode) $ do msgPtr <- withStreamPtr (#{peek z_stream, msg}) if msgPtr /= nullPtr then unsafeLiftIO (peekCAString msgPtr) else return altMsg failIfError :: CInt -> Stream () failIfError errno = toStatus errno >>= \status -> case status of (Error _ msg) -> fail msg _ -> return () data Flush = NoFlush | SyncFlush | FullFlush | Finish | Block fromFlush :: Flush -> CInt fromFlush NoFlush = #{const Z_NO_FLUSH} fromFlush SyncFlush = #{const Z_SYNC_FLUSH} fromFlush FullFlush = #{const Z_FULL_FLUSH} fromFlush Finish = #{const Z_FINISH} fromFlush Block = #{const Z_BLOCK} -- | The format used for compression or decompression. There are three -- variations. -- data Format = GZip | Zlib | Raw | GZipOrZlib deriving (Eq, Ord, Enum, Bounded, Show , Generic ) -- | The gzip format uses a header with a checksum and some optional meta-data -- about the compressed file. It is intended primarily for compressing -- individual files but is also sometimes used for network protocols such as -- HTTP. The format is described in detail in RFC #1952 -- -- gzipFormat :: Format gzipFormat = GZip -- | The zlib format uses a minimal header with a checksum but no other -- meta-data. It is especially designed for use in network protocols. The -- format is described in detail in RFC #1950 -- -- zlibFormat :: Format zlibFormat = Zlib -- | The \'raw\' format is just the compressed data stream without any -- additional header, meta-data or data-integrity checksum. The format is -- described in detail in RFC #1951 -- rawFormat :: Format rawFormat = Raw -- | This is not a format as such. It enabled zlib or gzip decoding with -- automatic header detection. This only makes sense for decompression. -- gzipOrZlibFormat :: Format gzipOrZlibFormat = GZipOrZlib formatSupportsDictionary :: Format -> Bool formatSupportsDictionary Zlib = True formatSupportsDictionary Raw = True formatSupportsDictionary _ = False -- | The compression method -- data Method = Deflated deriving (Eq, Ord, Enum, Bounded, Show , Generic ) -- | The only method supported in this version of zlib. -- Indeed it is likely to be the only method that ever will be supported. -- deflateMethod :: Method deflateMethod = Deflated fromMethod :: Method -> CInt fromMethod Deflated = #{const Z_DEFLATED} -- | The compression level parameter controls the amount of compression. This -- is a trade-off between the amount of compression and the time required to do -- the compression. -- newtype CompressionLevel = CompressionLevel Int deriving ( Eq , Ord -- ^ @since 0.7.0.0 , Show , Generic ) -- | The default t'CompressionLevel'. defaultCompression :: CompressionLevel defaultCompression = CompressionLevel 6 -- Ideally we should use #{const Z_DEFAULT_COMPRESSION} = -1, whose meaning -- depends on zlib version and, strictly speaking, is not guaranteed to be 6. -- It would however interact badly with Eq / Ord instances. -- | No compression, just a block copy. noCompression :: CompressionLevel noCompression = CompressionLevel #{const Z_NO_COMPRESSION} -- | The fastest compression method (less compression). bestSpeed :: CompressionLevel bestSpeed = CompressionLevel #{const Z_BEST_SPEED} -- | The slowest compression method (best compression). bestCompression :: CompressionLevel bestCompression = CompressionLevel #{const Z_BEST_COMPRESSION} -- | A specific compression level in the range @0..9@. -- Throws an error for arguments outside of this range. -- -- * 0 stands for 'noCompression', -- * 1 stands for 'bestSpeed', -- * 6 stands for 'defaultCompression', -- * 9 stands for 'bestCompression'. -- compressionLevel :: Int -> CompressionLevel compressionLevel n | n >= 0 && n <= 9 = CompressionLevel n | otherwise = error "CompressionLevel must be in the range 0..9" fromCompressionLevel :: CompressionLevel -> CInt fromCompressionLevel (CompressionLevel n) | n >= 0 && n <= 9 = int2cint n | otherwise = error "CompressLevel must be in the range 0..9" -- | This specifies the size of the compression window. Larger values of this -- parameter result in better compression at the expense of higher memory -- usage. -- -- The compression window size is the value of the the window bits raised to -- the power 2. The window bits must be in the range @9..15@ which corresponds -- to compression window sizes of 512b to 32Kb. The default is 15 which is also -- the maximum size. -- -- The total amount of memory used depends on the window bits and the -- t'MemoryLevel'. See the t'MemoryLevel' for the details. -- newtype WindowBits = WindowBits Int deriving ( Eq , Ord , Show , Generic ) -- zlib manual (https://www.zlib.net/manual.html#Advanced) says that WindowBits -- could be in the range 8..15, but for some reason we require 9..15. -- Could it be that older versions of zlib had a tighter limit?.. -- | The default t'WindowBits'. Equivalent to @'windowBits' 15@. -- which is also the maximum size. -- defaultWindowBits :: WindowBits defaultWindowBits = WindowBits 15 -- | A specific compression window size, specified in bits in the range @9..15@. -- Throws an error for arguments outside of this range. -- windowBits :: Int -> WindowBits windowBits n | n >= 9 && n <= 15 = WindowBits n | otherwise = error "WindowBits must be in the range 9..15" fromWindowBits :: Format -> WindowBits -> CInt fromWindowBits format bits = (formatModifier format) (checkWindowBits bits) where checkWindowBits (WindowBits n) | n >= 9 && n <= 15 = int2cint n | otherwise = error "WindowBits must be in the range 9..15" formatModifier Zlib = id formatModifier GZip = (+16) formatModifier GZipOrZlib = (+32) formatModifier Raw = negate -- | The t'MemoryLevel' parameter specifies how much memory should be allocated -- for the internal compression state. It is a trade-off between memory usage, -- compression ratio and compression speed. Using more memory allows faster -- compression and a better compression ratio. -- -- The total amount of memory used for compression depends on the t'WindowBits' -- and the t'MemoryLevel'. For decompression it depends only on the -- t'WindowBits'. The totals are given by the functions: -- -- > compressTotal windowBits memLevel = 4 * 2^windowBits + 512 * 2^memLevel -- > decompressTotal windowBits = 2^windowBits -- -- For example, for compression with the default @windowBits = 15@ and -- @memLevel = 8@ uses @256Kb@. So for example a network server with 100 -- concurrent compressed streams would use @25Mb@. The memory per stream can be -- halved (at the cost of somewhat degraded and slower compression) by -- reducing the @windowBits@ and @memLevel@ by one. -- -- Decompression takes less memory, the default @windowBits = 15@ corresponds -- to just @32Kb@. -- newtype MemoryLevel = MemoryLevel Int deriving ( Eq , Ord -- ^ @since 0.7.0.0 , Show , Generic ) -- | The default t'MemoryLevel'. Equivalent to @'memoryLevel' 8@. -- defaultMemoryLevel :: MemoryLevel defaultMemoryLevel = MemoryLevel 8 -- | Use minimum memory. This is slow and reduces the compression ratio. -- Equivalent to @'memoryLevel' 1@. -- minMemoryLevel :: MemoryLevel minMemoryLevel = MemoryLevel 1 -- | Use maximum memory for optimal compression speed. -- Equivalent to @'memoryLevel' 9@. -- maxMemoryLevel :: MemoryLevel maxMemoryLevel = MemoryLevel 9 -- | A specific memory level in the range @1..9@. -- Throws an error for arguments outside of this range. -- memoryLevel :: Int -> MemoryLevel memoryLevel n | n >= 1 && n <= 9 = MemoryLevel n | otherwise = error "MemoryLevel must be in the range 1..9" fromMemoryLevel :: MemoryLevel -> CInt fromMemoryLevel (MemoryLevel n) | n >= 1 && n <= 9 = int2cint n | otherwise = error "MemoryLevel must be in the range 1..9" -- | The strategy parameter is used to tune the compression algorithm. -- -- The strategy parameter only affects the compression ratio but not the -- correctness of the compressed output even if it is not set appropriately. -- data CompressionStrategy = DefaultStrategy | Filtered | HuffmanOnly | RLE -- ^ @since 0.7.0.0 | Fixed -- ^ @since 0.7.0.0 deriving (Eq, Ord, Enum, Bounded, Show , Generic ) -- | Use this default compression strategy for normal data. -- defaultStrategy :: CompressionStrategy defaultStrategy = DefaultStrategy -- | Use the filtered compression strategy for data produced by a filter (or -- predictor). Filtered data consists mostly of small values with a somewhat -- random distribution. In this case, the compression algorithm is tuned to -- compress them better. The effect of this strategy is to force more Huffman -- coding and less string matching; it is somewhat intermediate between -- 'defaultStrategy' and 'huffmanOnlyStrategy'. -- filteredStrategy :: CompressionStrategy filteredStrategy = Filtered -- | Use the Huffman-only compression strategy to force Huffman encoding only -- (no string match). -- huffmanOnlyStrategy :: CompressionStrategy huffmanOnlyStrategy = HuffmanOnly -- | Use 'rleStrategy' to limit match distances to one (run-length -- encoding). 'rleStrategy' is designed to be almost as fast as -- 'huffmanOnlyStrategy', but give better compression for PNG -- image data. -- -- @since 0.7.0.0 rleStrategy :: CompressionStrategy rleStrategy = RLE -- | 'fixedStrategy' prevents the use of dynamic Huffman codes, -- allowing for a simpler decoder for special applications. -- -- @since 0.7.0.0 fixedStrategy :: CompressionStrategy fixedStrategy = Fixed fromCompressionStrategy :: CompressionStrategy -> CInt fromCompressionStrategy DefaultStrategy = #{const Z_DEFAULT_STRATEGY} fromCompressionStrategy Filtered = #{const Z_FILTERED} fromCompressionStrategy HuffmanOnly = #{const Z_HUFFMAN_ONLY} fromCompressionStrategy RLE = #{const Z_RLE} fromCompressionStrategy Fixed = #{const Z_FIXED} withStreamPtr :: (Ptr StreamState -> IO a) -> Stream a withStreamPtr f = do stream <- getStreamState unsafeLiftIO (withForeignPtr stream f) withStreamState :: (StreamState -> IO a) -> Stream a withStreamState f = do stream <- getStreamState unsafeLiftIO (withForeignPtr stream (f . StreamState)) setInAvail :: CUInt -> Stream () setInAvail val = withStreamPtr $ \ptr -> #{poke z_stream, avail_in} ptr val getInAvail :: Stream Int getInAvail = liftM cuint2int $ withStreamPtr (#{peek z_stream, avail_in}) setInNext :: Ptr Word8 -> Stream () setInNext val = withStreamPtr (\ptr -> #{poke z_stream, next_in} ptr val) getInNext :: Stream (Ptr Word8) getInNext = withStreamPtr (#{peek z_stream, next_in}) setOutFree :: CUInt -> Stream () setOutFree val = withStreamPtr $ \ptr -> #{poke z_stream, avail_out} ptr val getOutFree :: Stream Int getOutFree = liftM cuint2int $ withStreamPtr (#{peek z_stream, avail_out}) setOutNext :: Ptr Word8 -> Stream () setOutNext val = withStreamPtr (\ptr -> #{poke z_stream, next_out} ptr val) #ifdef DEBUG getOutNext :: Stream (Ptr Word8) getOutNext = withStreamPtr (#{peek z_stream, next_out}) #endif inflateInit :: Format -> WindowBits -> Stream () inflateInit format bits = do checkFormatSupported format err <- withStreamState $ \zstream -> c_inflateInit2 zstream (fromWindowBits format bits) failIfError err getStreamState >>= unsafeLiftIO . addForeignPtrFinalizer c_inflateEnd deflateInit :: Format -> CompressionLevel -> Method -> WindowBits -> MemoryLevel -> CompressionStrategy -> Stream () deflateInit format compLevel method bits memLevel strategy = do checkFormatSupported format err <- withStreamState $ \zstream -> c_deflateInit2 zstream (fromCompressionLevel compLevel) (fromMethod method) (fromWindowBits format bits) (fromMemoryLevel memLevel) (fromCompressionStrategy strategy) failIfError err getStreamState >>= unsafeLiftIO . addForeignPtrFinalizer c_deflateEnd inflate_ :: Flush -> Stream Status inflate_ flush = do err <- withStreamState $ \zstream -> c_inflate zstream (fromFlush flush) toStatus err deflate_ :: Flush -> Stream Status deflate_ flush = do err <- withStreamState $ \zstream -> c_deflate zstream (fromFlush flush) toStatus err -- | This never needs to be used as the stream's resources will be released -- automatically when no longer needed, however this can be used to release -- them early. Only use this when you can guarantee that the stream will no -- longer be needed, for example if an error occurs or if the stream ends. -- finalise :: Stream () --TODO: finalizeForeignPtr is ghc-only finalise = getStreamState >>= unsafeLiftIO . finalizeForeignPtr checkFormatSupported :: Format -> Stream () checkFormatSupported format = do version <- unsafeLiftIO (coerce peekCAString =<< c_zlibVersion) case version of ('1':'.':'1':'.':_) | format == GZip || format == GZipOrZlib -> fail $ "version 1.1.x of the zlib C library does not support the" ++ " 'gzip' format via the in-memory api, only the 'raw' and " ++ " 'zlib' formats." _ -> return () -- | This one should not fail on 64-bit arch. cuint2int :: CUInt -> Int cuint2int n = fromMaybe (error $ "cuint2int: cannot cast " ++ show n) $ toIntegralSized n -- | This one could and will fail if chunks of ByteString are longer than 4G. int2cuint :: Int -> CUInt int2cuint n = fromMaybe (error $ "int2cuint: cannot cast " ++ show n) $ toIntegralSized n -- | This one could fail in theory, but is used only on arguments 0..9 or 0..15. int2cint :: Int -> CInt int2cint n = fromMaybe (error $ "int2cint: cannot cast " ++ show n) $ toIntegralSized n ---------------------- -- The foreign imports newtype StreamState = StreamState (Ptr StreamState) ##ifdef NON_BLOCKING_FFI ##define SAFTY safe ##else ##define SAFTY unsafe ##endif foreign import capi unsafe "zlib.h inflateInit2" c_inflateInit2 :: StreamState -> CInt -> IO CInt foreign import capi unsafe "zlib.h deflateInit2" c_deflateInit2 :: StreamState -> CInt -> CInt -> CInt -> CInt -> CInt -> IO CInt foreign import capi SAFTY "zlib.h inflate" c_inflate :: StreamState -> CInt -> IO CInt foreign import capi unsafe "hs-zlib.h &_hs_zlib_inflateEnd" c_inflateEnd :: FinalizerPtr StreamState foreign import capi unsafe "zlib.h inflateReset" c_inflateReset :: StreamState -> IO CInt foreign import capi unsafe "zlib.h deflateSetDictionary" c_deflateSetDictionary :: StreamState -> Ptr CUChar -> CUInt -> IO CInt foreign import capi unsafe "zlib.h inflateSetDictionary" c_inflateSetDictionary :: StreamState -> Ptr CUChar -> CUInt -> IO CInt foreign import capi SAFTY "zlib.h deflate" c_deflate :: StreamState -> CInt -> IO CInt foreign import capi unsafe "hs-zlib.h &_hs_zlib_deflateEnd" c_deflateEnd :: FinalizerPtr StreamState #if MIN_VERSION_base(4,18,0) foreign import capi unsafe "zlib.h zlibVersion" c_zlibVersion :: IO (ConstPtr CChar) #else foreign import ccall unsafe "zlib.h zlibVersion" c_zlibVersion :: IO (Ptr CChar) #endif foreign import capi unsafe "zlib.h adler32" c_adler32 :: CULong -> Ptr CUChar -> CUInt -> IO CULong zlib-0.7.1.1/LICENSE0000644000000000000000000000250407346545000012015 0ustar0000000000000000Copyright (c) 2006-2016, Duncan Coutts All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. This clause is intentionally left blank. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. zlib-0.7.1.1/README.md0000644000000000000000000000147407346545000012274 0ustar0000000000000000# zlib [![CI][CI badge]][CI page] [![Hackage][Hackage badge]][Hackage page] **Compression and decompression in the gzip and zlib format** This package provides a pure interface for compressing and decompressing streams of data represented as lazy `ByteString`s. It uses the `zlib` C library so it has high performance. It supports the `zlib`, `gzip` and `raw` compression formats. It provides a convenient high level API suitable for most tasks and for the few cases where more control is needed it provides access to the full zlib feature set. [CI badge]: https://github.com/haskell/zlib/actions/workflows/haskell-ci.yml/badge.svg [CI page]: https://github.com/haskell/zlib/actions/workflows/haskell-ci.yml [Hackage page]: https://hackage.haskell.org/package/zlib [Hackage badge]: https://img.shields.io/hackage/v/zlib.svg zlib-0.7.1.1/cbits-extra/0000755000000000000000000000000007346545000013234 5ustar0000000000000000zlib-0.7.1.1/cbits-extra/hs-zlib.c0000644000000000000000000000023107346545000014744 0ustar0000000000000000#include "hs-zlib.h" void _hs_zlib_inflateEnd(z_streamp strm) { inflateEnd(strm); } void _hs_zlib_deflateEnd(z_streamp strm) { deflateEnd(strm); } zlib-0.7.1.1/cbits-extra/hs-zlib.h0000644000000000000000000000023507346545000014755 0ustar0000000000000000#ifndef HS_ZLIB_EXTRAS #define HS_ZLIB_EXTRAS #include "zlib.h" void _hs_zlib_inflateEnd(z_streamp strm); void _hs_zlib_deflateEnd(z_streamp strm); #endif zlib-0.7.1.1/changelog.md0000644000000000000000000000664407346545000013272 0ustar0000000000000000See also http://pvp.haskell.org/faq 0.7.1.1 Bodigrim September 2025 * Allow newer versions of dependencies. * Fix new GHC warnings. 0.7.1.0 Bodigrim April 2024 * Split zlib C sources into `zlib-clib` package (thanks @hasufell). * Use zlib-clib on Windows, unless pkg-config is available. 0.7.0.0 Bodigrim February 2024 * Bump bundled `zlib` to 1.3.1. * Hide deprecated constructors of `CompressionLevel`, `Method`, `WindowBits`, `MemoryLevel`, `CompressionStrategy` and `Format`. * Make `WindowBits`, `MemoryLevel` and `CompressionLevel` newtypes over `Int`. * Add smart constructors `rleStrategy` and `fixedStrategy`. * Add assorted `Eq`, `Ord`, `Typeable` and `Generic` instances. * Make flag `pkg-config` automatic and on by default. * Make flag `bundled-c-zlib` to take priority over `pkg-config`. * Do not force `bundled-c-zlib` on Windows, but force it for WASM. * Strip `install-includes`, do not install any headers. * Export `DecompressError` from non-internal modules. * Fix compression/decompression of `ByteString` chunks > 4G. * Flip flag `non-blocking-ffi` to be `True` be default. 0.6.3.0 Bodigrim May 2022 * Bump bundled zlib to 1.2.12, #48 * Support base-4.17 0.6.2.3 Emily Pillmore February 2021 * Add support for bytestring-0.11.0.0 0.6.2.2 Julian Ospald August 2020 * Bump bundled zlib to 1.2.11, fixes #26 * New build flag to force use of the bundled zlib C sources, #31 * Simpler build support for ghcjs, #25 * Add support for GHC 8.10 / base-4.14, #29 0.6.2.1 Herbert Valerio Riedel August 2019 * Add support for GHC 8.8 / base-4.13 0.6.2 Herbert Valerio Riedel March 2018 * New cabal flag 'pkg-config' for discovering 'zlib` via pkg-config(1) (#16) * Use CApiFFI where available for cross-compile friendliness (#14) * Change the window bits range from 8..15 to 9..15 (#11) 0.6.1.2 Herbert Valerio Riedel October 2016 * Fix a segfault when reading the stream multithreaded, #7 * New experimental cabal flag 'non-blocking-ffi' for 'safe' ffi calls 0.6.1.1 Duncan Coutts April 2015 * Fixed building with GHC 7.0 and 7.2 0.6.0.2 Duncan Coutts April 2015 * Fixed building with GHC 7.0 and 7.2 0.6.1.0 Duncan Coutts April 2015 * Support for concatenated gzip files (multiple back-to-back streams) 0.6.0.1 Duncan Coutts April 2015 * Fixed building with older GHC * Fixed warnings with new GHC * Fixed building on Windows * Fixed testsuite 0.6.0.0 Duncan Coutts April 2015 * New incremental interface for compression and decompression * Provide access to unconsumed trailing data * Simplified structured error type, and instance of Exception * Updated bundled zlib C code to 1.2.8 (used on Windows) * Fixed memory leak of zlib z_stream C structure * More derived instances (Eq, Show, Typeable, Generic) 0.5.4.2 Duncan Coutts November 2014 * Builds with GHC 7.10 0.5.4.1 Duncan Coutts February 2013 * Force tail of input when finished decompressing, to help lazy IO 0.5.4.0 Duncan Coutts September 2012 * New support for zlib custom dictionaries zlib-0.7.1.1/examples/0000755000000000000000000000000007346545000012625 5ustar0000000000000000zlib-0.7.1.1/examples/gunzip.hs0000644000000000000000000000022107346545000014470 0ustar0000000000000000module Main where import qualified Data.ByteString.Lazy as B import qualified Codec.Compression.GZip as GZip main = B.interact GZip.decompress zlib-0.7.1.1/examples/gzip.hs0000644000000000000000000000041007346545000014125 0ustar0000000000000000module Main where import qualified Data.ByteString.Lazy as B import qualified Codec.Compression.GZip as GZip main = B.interact $ GZip.compressWith GZip.defaultCompressParams { GZip.compressLevel = GZip.BestCompression } zlib-0.7.1.1/test/0000755000000000000000000000000007346545000011766 5ustar0000000000000000zlib-0.7.1.1/test/Test.hs0000644000000000000000000003470507346545000013252 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE RankNTypes #-} module Main where import Codec.Compression.Zlib.Internal import qualified Codec.Compression.Zlib as Zlib import qualified Codec.Compression.GZip as GZip import qualified Codec.Compression.Zlib.Raw as Raw import Test.Codec.Compression.Zlib.Internal () import Test.Codec.Compression.Zlib.Stream () import Test.Tasty import Test.Tasty.QuickCheck import Utils () import Control.Monad import Control.Monad.ST.Lazy (ST) import Control.Exception import Data.Bits (finiteBitSize, shiftL) import qualified Data.ByteString.Char8 as BS.Char8 import qualified Data.ByteString.Lazy as BL import qualified Data.ByteString as BS #if !MIN_VERSION_bytestring(0,11,0) import qualified Data.ByteString.Internal as BS #endif import System.IO main :: IO () main = defaultMain $ testGroup "zlib tests" [ testGroup "property tests" [ testProperty "decompress . compress = id (standard)" prop_decompress_after_compress, testProperty "decompress . compress = id (Zlib -> GZipOrZLib)" prop_gziporzlib1, testProperty "decompress . compress = id (GZip -> GZipOrZlib)" prop_gziporzlib2, testProperty "concatenated gzip members" prop_gzip_concat, testProperty "multiple gzip members, boundaries (all 2-chunks)" prop_multiple_members_boundary2, testProperty "multiple gzip members, boundaries (all 3-chunks)" prop_multiple_members_boundary3, testProperty "prefixes of valid stream detected as truncated" prop_truncated, testProperty "compress works with BSes with non-zero offset" prop_compress_nonzero_bs_offset ], testGroup "unit tests" [ testProperty "simple gzip case" $ once test_simple_gzip, testProperty "detect bad CRC" $ once test_bad_crc, testProperty "detect non-gzip" $ once test_non_gzip, testProperty "detect custom dictionary" $ once test_custom_dict, testProperty "detect inflate with wrong dict" $ once test_wrong_dictionary, testProperty "detect inflate with right dict" $ once test_right_dictionary, testProperty "handle trailing data" $ once test_trailing_data, testProperty "multiple gzip members" $ once test_multiple_members, testProperty "check small input chunks" $ once test_small_chunks, testProperty "check empty input" $ once test_empty, testProperty "check exception raised" $ once test_exception, testProperty "check compress large chunk" $ once test_compress_large_chunk ] ] prop_decompress_after_compress :: Format -> CompressParams -> DecompressParams -> Property prop_decompress_after_compress w cp dp = (w /= zlibFormat || decompressWindowBits dp >= compressWindowBits cp) && (decompressWindowBits dp > compressWindowBits cp) && decompressBufferSize dp > 0 && compressBufferSize cp > 0 ==> liftM2 (==) (decompress w dp . compress w cp) id prop_gziporzlib1 :: CompressParams -> DecompressParams -> Property prop_gziporzlib1 cp dp = decompressWindowBits dp > compressWindowBits cp && decompressBufferSize dp > 0 && compressBufferSize cp > 0 ==> liftM2 (==) (decompress gzipOrZlibFormat dp . compress zlibFormat cp) id prop_gziporzlib2 :: CompressParams -> DecompressParams -> Property prop_gziporzlib2 cp dp = decompressWindowBits dp >= compressWindowBits cp && decompressBufferSize dp > 0 && compressBufferSize cp > 0 ==> liftM2 (==) (decompress gzipOrZlibFormat dp . compress gzipFormat cp) id prop_gzip_concat :: CompressParams -> DecompressParams -> BL.ByteString -> Property prop_gzip_concat cp dp input = decompressWindowBits dp >= compressWindowBits cp && decompressBufferSize dp > 0 && compressBufferSize cp > 0 ==> let catComp = BL.concat (replicate 5 (compress gzipFormat cp input)) compCat = compress gzipFormat cp (BL.concat (replicate 5 input)) in decompress gzipFormat dp { decompressAllMembers = True } catComp == decompress gzipFormat dp { decompressAllMembers = True } compCat prop_multiple_members_boundary2 :: Property prop_multiple_members_boundary2 = forAll shortStrings $ \bs -> all (\c -> decomp c == BL.append bs bs) (twoChunkSplits (comp bs `BL.append` comp bs)) where comp = compress gzipFormat defaultCompressParams decomp = decompress gzipFormat defaultDecompressParams shortStrings = fmap BL.pack $ listOf arbitrary prop_multiple_members_boundary3 :: Property prop_multiple_members_boundary3 = forAll shortStrings $ \bs -> all (\c -> decomp c == BL.append bs bs) (threeChunkSplits (comp bs `BL.append` comp bs)) where comp = compress gzipFormat defaultCompressParams decomp = decompress gzipFormat defaultDecompressParams shortStrings = sized $ \sz -> resize (sz `div` 10) $ fmap BL.pack $ listOf arbitrary prop_truncated :: Format -> Property prop_truncated format = forAll shortStrings $ \bs -> all (truncated decomp) (init (BL.inits (comp bs))) -- All the initial prefixes of a valid compressed stream should be detected -- as truncated. where comp = compress format defaultCompressParams decomp = decompressST format defaultDecompressParams truncated :: (forall s. DecompressStream (ST s)) -> BL.ByteString -> Bool truncated = foldDecompressStreamWithInput (\_ r -> r) (\_ -> False) (\err -> case err of TruncatedInput -> True; _ -> False) shortStrings = sized $ \sz -> resize (sz `div` 6) arbitrary prop_compress_nonzero_bs_offset :: BS.ByteString -> Int -> Property prop_compress_nonzero_bs_offset original to_drop = to_drop > 0 && BS.length original > to_drop ==> let input = BS.drop to_drop original #if MIN_VERSION_bytestring(0,11,0) dropped = to_drop #else (BS.PS _ptr dropped _length) = input #endif input' = BL.pack $ BS.unpack input -- BL.fromStrict is only available since bytestring-0.10.4.0 compressed = compress gzipFormat defaultCompressParams input' decompressed = decompress gzipFormat defaultDecompressParams compressed in dropped == to_drop && decompressed == input' test_simple_gzip :: Property test_simple_gzip = ioProperty $ withSampleData "hello.gz" $ \hnd -> let decomp = decompressIO gzipFormat defaultDecompressParams in assertDecompressOk hnd decomp test_bad_crc :: Property test_bad_crc = ioProperty $ withSampleData "bad-crc.gz" $ \hnd -> do let decomp = decompressIO gzipFormat defaultDecompressParams assertDecompressError hnd (assertDataFormatError "incorrect data check") decomp test_non_gzip :: Property test_non_gzip = conjoin [ ioProperty $ withSampleData "not-gzip" $ \hnd -> do let decomp = decompressIO gzipFormat defaultDecompressParams assertDecompressError hnd (assertDataFormatError "incorrect header check") decomp , ioProperty $ withSampleData "not-gzip" $ \hnd -> do let decomp = decompressIO zlibFormat defaultDecompressParams assertDecompressError hnd (assertDataFormatError "incorrect header check") decomp , ioProperty $ withSampleData "not-gzip" $ \hnd -> do let decomp = decompressIO rawFormat defaultDecompressParams checkError err = disjoin -- The majority of platforms throw this: [ assertDataFormatError "invalid code lengths set" err -- But on z15+ mainframes zlib employs CPU instruction DFLTCC, -- which returns error code with the same meaning. -- See http://publibfp.dhe.ibm.com/epubs/pdf/a227832c.pdf, page 26-37 -- and https://github.com/haskell/zlib/issues/46 , assertDataFormatError "Operation-Ending-Supplemental Code is 0x27" err ] assertDecompressError hnd checkError decomp , ioProperty $ withSampleData "not-gzip" $ \hnd -> do let decomp = decompressIO gzipOrZlibFormat defaultDecompressParams assertDecompressError hnd (assertDataFormatError "incorrect header check") decomp ] test_custom_dict :: Property test_custom_dict = ioProperty $ withSampleData "custom-dict.zlib" $ \hnd -> do let decomp = decompressIO zlibFormat defaultDecompressParams assertDecompressError hnd (=== DictionaryRequired) decomp test_wrong_dictionary :: Property test_wrong_dictionary = ioProperty $ withSampleData "custom-dict.zlib" $ \hnd -> do let decomp = decompressIO zlibFormat defaultDecompressParams { decompressDictionary = -- wrong dict! Just (BS.pack [65,66,67]) } assertDecompressError hnd (=== DictionaryMismatch) decomp test_right_dictionary :: Property test_right_dictionary = ioProperty $ withSampleData "custom-dict.zlib" $ \hnd -> do dict <- readSampleData "custom-dict.zlib-dict" let decomp = decompressIO zlibFormat defaultDecompressParams { decompressDictionary = Just (toStrict dict) } assertDecompressOk hnd decomp test_trailing_data :: Property test_trailing_data = ioProperty $ withSampleData "two-files.gz" $ \hnd -> do let decomp = decompressIO gzipFormat defaultDecompressParams { decompressAllMembers = False } checkChunks chunks = case chunks of [chunk] -> chunk === BS.Char8.pack "Test 1" _ -> counterexample "expected single chunk" False assertDecompressOkChunks hnd checkChunks decomp test_multiple_members :: Property test_multiple_members = ioProperty $ withSampleData "two-files.gz" $ \hnd -> do let decomp = decompressIO gzipFormat defaultDecompressParams { decompressAllMembers = True } checkChunks chunks = case chunks of [chunk1, chunk2] -> chunk1 === BS.Char8.pack "Test 1" .&&. chunk2 === BS.Char8.pack "Test 2" _ -> counterexample "expected two chunks" False assertDecompressOkChunks hnd checkChunks decomp test_small_chunks :: Property test_small_chunks = ioProperty $ do uncompressedFile <- readSampleData "not-gzip" compressedFile <- readSampleData "hello.gz" return $ conjoin [ GZip.compress (smallChunks uncompressedFile) === GZip.compress uncompressedFile , Zlib.compress (smallChunks uncompressedFile) === Zlib.compress uncompressedFile , Raw.compress (smallChunks uncompressedFile) === Raw.compress uncompressedFile , GZip.decompress (smallChunks (GZip.compress uncompressedFile)) === uncompressedFile , Zlib.decompress (smallChunks (Zlib.compress uncompressedFile)) === uncompressedFile , Raw.decompress (smallChunks (Raw.compress uncompressedFile)) === uncompressedFile , (GZip.decompress . smallChunks) compressedFile === GZip.decompress compressedFile ] test_empty :: Property test_empty = ioProperty $ do -- Regression test to make sure we only ask for input once in the case of -- initially empty input. We previously asked for input twice before -- returning the error. let decomp = decompressIO zlibFormat defaultDecompressParams case decomp of DecompressInputRequired next -> do decomp' <- next BS.empty case decomp' of DecompressStreamError TruncatedInput -> return $ property True _ -> return $ counterexample "expected truncated error" False _ -> return $ counterexample "expected input" False test_exception :: Property test_exception = ioProperty $ do compressedFile <- readSampleData "bad-crc.gz" len <- try (evaluate (BL.length (GZip.decompress compressedFile))) return $ case len of Left err -> assertDataFormatError "incorrect data check" err Right{} -> counterexample "expected exception" False test_compress_large_chunk :: Property test_compress_large_chunk = counterexample ("Expected " ++ show len ++ " zeros but got different result, please investigate manually") (property test) where test = GZip.decompress (GZip.compress (BL.replicate len 0)) == BL.replicate len 0 len = case finiteBitSize (0 :: Int) of 64 -> (1 `shiftL` 32) + 1 _ -> 0 -- ignore it toStrict :: BL.ByteString -> BS.ByteString #if MIN_VERSION_bytestring(0,10,0) toStrict = BL.toStrict #else toStrict = BS.concat . BL.toChunks #endif ----------------------- -- Chunk boundary utils smallChunks :: BL.ByteString -> BL.ByteString smallChunks = BL.fromChunks . map (\c -> BS.pack [c]) . BL.unpack twoChunkSplits :: BL.ByteString -> [BL.ByteString] twoChunkSplits bs = zipWith (\a b -> BL.fromChunks [a,b]) (BS.inits sbs) (BS.tails sbs) where sbs = toStrict bs threeChunkSplits :: BL.ByteString -> [BL.ByteString] threeChunkSplits bs = [ BL.fromChunks [a,b,c] | (a,x) <- zip (BS.inits sbs) (BS.tails sbs) , (b,c) <- zip (BS.inits x) (BS.tails x) ] where sbs = toStrict bs -------------- -- HUnit Utils readSampleData :: FilePath -> IO BL.ByteString readSampleData file = BL.readFile ("test/data/" ++ file) withSampleData :: FilePath -> (Handle -> IO a) -> IO a withSampleData file = withFile ("test/data/" ++ file) ReadMode expected :: String -> String -> Property expected e g = counterexample ("expected: " ++ e ++ "\nbut got: " ++ g) False assertDecompressOk :: Handle -> DecompressStream IO -> IO Property assertDecompressOk hnd = foldDecompressStream (BS.hGet hnd 4000 >>=) (\_ r -> r) (\_ -> return $ property True) (\err -> return $ expected "decompress ok" (show err)) assertDecompressOkChunks :: Handle -> ([BS.ByteString] -> Property) -> DecompressStream IO -> IO Property assertDecompressOkChunks hnd callback = fmap (either id callback) . foldDecompressStream (BS.hGet hnd 4000 >>=) (\chunk -> liftM (liftM (chunk:))) (\_ -> return $ Right []) (\err -> return $ Left $ expected "decompress ok" (show err)) assertDecompressError :: Handle -> (DecompressError -> Property) -> DecompressStream IO -> IO Property assertDecompressError hnd callback = foldDecompressStream (BS.hGet hnd 4000 >>=) (\_ r -> r) (\_ -> return $ expected "StreamError" "StreamEnd") (return . callback) assertDataFormatError :: String -> DecompressError -> Property assertDataFormatError expect (DataFormatError actual) = expect === actual assertDataFormatError _ _ = counterexample "expected DataError" False zlib-0.7.1.1/test/Test/Codec/Compression/Zlib/0000755000000000000000000000000007346545000017123 5ustar0000000000000000zlib-0.7.1.1/test/Test/Codec/Compression/Zlib/Internal.hs0000644000000000000000000000226107346545000021234 0ustar0000000000000000{-# OPTIONS_GHC -fno-warn-orphans #-} -- | Test code and properties for "Codec.Compression.Zlib.Internal" -- module Test.Codec.Compression.Zlib.Internal where import Codec.Compression.Zlib.Internal import Test.Codec.Compression.Zlib.Stream () import Test.QuickCheck import Control.Monad (ap) instance Arbitrary CompressParams where arbitrary = return CompressParams `ap` arbitrary `ap` arbitrary `ap` arbitrary `ap` arbitrary `ap` arbitrary `ap` arbitraryBufferSize `ap` return Nothing arbitraryBufferSize :: Gen Int arbitraryBufferSize = frequency $ [(10, return n) | n <- [1..1024]] ++ [(20, return n) | n <- [1025..8192]] ++ [(40, return n) | n <- [8193..131072]] ++ [(1, return n) | n <- [131072..1048576]] instance Arbitrary DecompressParams where arbitrary = return DecompressParams `ap` arbitrary `ap` arbitraryBufferSize `ap` return Nothing `ap` arbitrary zlib-0.7.1.1/test/Test/Codec/Compression/Zlib/Stream.hs0000644000000000000000000000206507346545000020715 0ustar0000000000000000{-# OPTIONS_GHC -fno-warn-orphans #-} -- | Test code and properties for "Codec.Compression.Zlib.Stream" -- module Test.Codec.Compression.Zlib.Stream where import Codec.Compression.Zlib.Internal import Test.QuickCheck instance Arbitrary Format where -- GZipOrZlib omitted since it's not symmetric arbitrary = elements [gzipFormat, zlibFormat, rawFormat] instance Arbitrary Method where arbitrary = return deflateMethod instance Arbitrary CompressionLevel where arbitrary = elements $ [defaultCompression, noCompression, bestCompression, bestSpeed] ++ map compressionLevel [1..9] instance Arbitrary WindowBits where arbitrary = elements $ defaultWindowBits:map windowBits [9..15] instance Arbitrary MemoryLevel where arbitrary = elements $ [defaultMemoryLevel, minMemoryLevel, maxMemoryLevel] ++ [memoryLevel n | n <- [1..9]] instance Arbitrary CompressionStrategy where arbitrary = elements $ [defaultStrategy, filteredStrategy, huffmanOnlyStrategy, rleStrategy, fixedStrategy] zlib-0.7.1.1/test/Utils.hs0000644000000000000000000000144007346545000013421 0ustar0000000000000000{-# OPTIONS_GHC -fno-warn-orphans #-} module Utils where import qualified Data.ByteString.Lazy as BL import qualified Data.ByteString as BS import Test.QuickCheck ------------------- -- QuickCheck Utils maxStrSize :: Double maxStrSize = 500 -- convert a QC size parameter into one for generating long lists, -- growing inverse exponentially up to maxStrSize strSize :: Int -> Int strSize n = floor (maxStrSize * (1 - 2 ** (-fromIntegral n/100))) instance Arbitrary BL.ByteString where arbitrary = sized $ \sz -> fmap BL.fromChunks $ listOf $ resize (sz `div` 2) arbitrary shrink = map BL.pack . shrink . BL.unpack instance Arbitrary BS.ByteString where arbitrary = sized $ \sz -> resize (strSize sz) $ fmap BS.pack $ listOf $ arbitrary shrink = map BS.pack . shrink . BS.unpack zlib-0.7.1.1/test/data/0000755000000000000000000000000007346545000012677 5ustar0000000000000000zlib-0.7.1.1/test/data/bad-crc.gz0000644000000000000000000000151707346545000014540 0ustar0000000000000000Umo0_qh)}oZ' Pۄ^S1MwIJ[>s'Wby5v cAԁυ!A rԢ@(̼RL+ L彃y~ȅ[Rc/^Foǣѻћ~WTq!G+2V@x`n)XbstFCDVZsj$>e.H{P]B]PUQ(**SٔPBA%B ZB7?˥epV ;,%. hEFDK|$ )J*Ja Y 6A`||Kximjw@Gs4"VefJTZ(KQ=SS@!ʏJURͿY7ʛ%($!zc!m|,ctBnZӚ^i{G-k?rN=Hf9swa*a\;z3"lP8Aʇs|"1aZˁ7Jwd@,r$7Yے˜셮nhGFg_Ie `uZs|%fV5pd1ӆUhϠe|Du/l*-0.*jY.Z=EBM6]lpH_fĻ-ݛCx: IQv#)tf0m|hyw[k$k,.wIl^r?ʋt5/9Sܒ`آ8!xc͒`Cz&ݍOmQ1 mhtX4zlib-0.7.1.1/test/data/custom-dict.zlib0000644000000000000000000000001707346545000016012 0ustar0000000000000000x  ]zlib-0.7.1.1/test/data/custom-dict.zlib-dict0000644000000000000000000000000707346545000016732 0ustar0000000000000000Haskellzlib-0.7.1.1/test/data/hello.gz0000644000000000000000000000004707346545000014345 0ustar0000000000000000?JhelloHW/IQ} zlib-0.7.1.1/test/data/not-gzip0000644000000000000000000000003107346545000014363 0ustar0000000000000000This is not a gzip file! zlib-0.7.1.1/test/data/two-files.gz0000644000000000000000000000006707346545000015155 0ustar0000000000000000A I-.Q0'AAt2 I-.Q0.zlib-0.7.1.1/zlib.cabal0000644000000000000000000001212607346545000012735 0ustar0000000000000000cabal-version: >= 1.10 name: zlib version: 0.7.1.1 copyright: (c) 2006-2016 Duncan Coutts license: BSD3 license-file: LICENSE author: Duncan Coutts maintainer: Duncan Coutts , Andrew Lelechenko , Emily Pillmore , Herbert Valerio Riedel bug-reports: https://github.com/haskell/zlib/issues category: Codec synopsis: Compression and decompression in the gzip and zlib formats description: This package provides a pure interface for compressing and decompressing streams of data represented as lazy 'ByteString's. It uses the so it has high performance. It supports the \"zlib\", \"gzip\" and \"raw\" compression formats. . It provides a convenient high level API suitable for most tasks and for the few cases where more control is needed it provides access to the full zlib feature set. build-type: Simple tested-with: GHC == 8.0.2 , GHC == 8.2.2 , GHC == 8.4.4 , GHC == 8.6.5 , GHC == 8.8.4 , GHC == 8.10.7 , GHC == 9.0.2 , GHC == 9.2.8 , GHC == 9.4.8 , GHC == 9.6.7 , GHC == 9.8.4 , GHC == 9.10.2 , GHC == 9.12.2 , GHC == 9.14.1 extra-source-files: changelog.md README.md -- extra headers cbits-extra/hs-zlib.h -- test data files test/data/bad-crc.gz test/data/custom-dict.zlib test/data/custom-dict.zlib-dict test/data/hello.gz test/data/not-gzip test/data/two-files.gz -- demo programs: examples/gzip.hs examples/gunzip.hs source-repository head type: git location: https://github.com/haskell/zlib.git flag non-blocking-ffi default: True manual: True description: The (de)compression calls can sometimes take a long time, which prevents other Haskell threads running. Enabling this flag avoids this unfairness, but with greater overall cost. flag pkg-config default: True manual: False description: Use @pkg-config@ executable to locate foreign @zlib@ library. flag bundled-c-zlib default: False manual: True description: Use @zlib-clib@ package with C sources instead of a system library. C sources are used for GHCJS and WASM unconditionally and on Windows unless @pkg-config@ flag is on. library exposed-modules: Codec.Compression.GZip, Codec.Compression.Zlib, Codec.Compression.Zlib.Raw, Codec.Compression.Zlib.Internal other-modules: Codec.Compression.Zlib.Stream, Codec.Compression.Zlib.ByteStringCompat default-language: Haskell2010 other-extensions: CPP, ForeignFunctionInterface, RankNTypes, BangPatterns other-extensions: DeriveGeneric other-extensions: CApiFFI build-depends: base >= 4.9 && < 4.23, bytestring >= 0.9 && < 0.13 build-tools: hsc2hs >= 0.67 && < 0.69 if os(windows) && impl(ghc < 8.4) build-tools: hsc2hs < 0.68.5 -- GHC 7 ships hsc2hs-0.67 include-dirs: cbits-extra c-sources: cbits-extra/hs-zlib.c ghc-options: -Wall -fwarn-tabs if flag(non-blocking-ffi) cpp-options: -DNON_BLOCKING_FFI -- Cross-platform builds (such as JS and WASM) must have access -- to C sources, so using zlib-clib unconditionally. -- -- On Windows, zlib is shipped as part of GHC's mingw/lib directory, -- which GHC always includes in its linker search path. However, -- there is no guarantee that zlib1.dll (the corresponding shared library) -- will be available on the user's PATH at runtime, making it risky to depend upon -- (see https://github.com/haskell/zlib/issues/65 for what can go wrong). -- Thus, we resort to zlib-clib unless pkg-config is available. if flag(bundled-c-zlib) || impl(ghcjs) || os(ghcjs) || arch(wasm32) || (!flag(pkg-config) && os(windows)) build-depends: zlib-clib < 2 else if flag(pkg-config) -- NB: pkg-config is available on windows as well when using msys2 pkgconfig-depends: zlib else extra-libraries: z test-suite tests type: exitcode-stdio-1.0 main-is: Test.hs other-modules: Utils, Test.Codec.Compression.Zlib.Internal, Test.Codec.Compression.Zlib.Stream hs-source-dirs: test default-language: Haskell2010 build-depends: base, bytestring, zlib, QuickCheck == 2.*, tasty >= 0.8 && < 1.6, tasty-quickcheck >= 0.8 && < 1 ghc-options: -Wall if impl(ghc >= 9.4) ghc-options: "-with-rtsopts=-M1G"