[
  {
    "path": ".gitattributes",
    "content": "/assets/* -text -crlf\n"
  },
  {
    "path": ".github/dependabot.yml",
    "content": "# Dependabot dependency version checks / updates\n\nversion: 2\nupdates:\n  - package-ecosystem: \"github-actions\"\n    # Workflow files stored in the\n    # default location of `.github/workflows`\n    directory: \"/\"\n    schedule:\n      interval: \"daily\"\n  - package-ecosystem: \"cargo\"\n    directory: \"/\"\n    schedule:\n      interval: \"daily\"\n  - package-ecosystem: \"cargo\"\n    directory: \"/zstd-safe\"\n    schedule:\n      interval: \"daily\"\n  - package-ecosystem: \"cargo\"\n    directory: \"/zstd-safe/zstd-sys\"\n    schedule:\n      interval: \"daily\"\n"
  },
  {
    "path": ".github/workflows/linux.yml",
    "content": "name: Linux\n\non:\n  push:\n    branches: [ main ]\n  pull_request:\n    branches: [ main ]\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  build:\n\n    runs-on: self-hosted\n\n    steps:\n    - uses: actions/checkout@v6\n      with:\n        submodules: recursive\n    - name: Build\n      run: cargo build --verbose\n    - name: Run tests\n      run: cargo test --verbose\n\n    - name: Build with feature thin\n      run: cargo build --verbose --features thin\n    - name: Run tests\n      run: cargo test --verbose --features thin\n\n    - name: Build zstd-safe with feature seekable\n      run: cargo build --manifest-path zstd-safe/Cargo.toml --verbose --features seekable\n    - name: Run zstd-safe tests with feature seekable\n      run: cargo test --manifest-path zstd-safe/Cargo.toml --verbose --features seekable\n    - name: Build zstd-safe with features std and seekable\n      run: cargo build --manifest-path zstd-safe/Cargo.toml --verbose --features std,seekable\n    - name: Run zstd-safe tests with features std and seekable\n      run: cargo test --manifest-path zstd-safe/Cargo.toml --verbose --features std,seekable\n"
  },
  {
    "path": ".github/workflows/macos.yml",
    "content": "name: macOS\n\non:\n  push:\n    branches: [ main ]\n  pull_request:\n    branches: [ main ]\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  build:\n\n    runs-on: macos-latest\n\n    steps:\n    - uses: actions/checkout@v6\n      with:\n        submodules: recursive\n    - name: Build\n      run: cargo build --verbose\n    - name: Run tests\n      run: cargo test --verbose\n\n"
  },
  {
    "path": ".github/workflows/wasm.yml",
    "content": "name: Wasm\n\non:\n  push:\n    branches: [ main ]\n  pull_request:\n    branches: [ main ]\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  build:\n\n    runs-on: self-hosted\n\n    steps:\n    - name: Wasm target\n      run: rustup target add wasm32-unknown-unknown\n    - uses: actions/checkout@v6\n      with:\n        submodules: recursive\n\n    - name: Build\n      run: cargo build --verbose --target wasm32-unknown-unknown\n    - name: Build with feature thin\n      run: cargo build --verbose --features thin --target wasm32-unknown-unknown\n"
  },
  {
    "path": ".github/workflows/windows.yml",
    "content": "name: Windows\n\non:\n  push:\n    branches: [ main ]\n  pull_request:\n    branches: [ main ]\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  build:\n\n    strategy:\n      matrix:\n        target:\n          #- i686-pc-windows-gnu\n          - i686-pc-windows-msvc\n          #- x86_64-pc-windows-gnu\n          - x86_64-pc-windows-msvc\n        channel: [ stable ]\n\n    runs-on: windows-latest\n\n    steps:\n    - uses: actions/checkout@v6\n      with:\n        submodules: recursive\n    - name: setup\n      uses: dtolnay/rust-toolchain@master\n      with:\n        toolchain: ${{ matrix.channel }}-${{ matrix.target }}\n        targets: ${{ matrix.target }}\n\n    - name: Add mingw32 to path for i686-gnu\n      run: |\n        echo \"C:\\msys64\\mingw32\\bin\" >> $GITHUB_PATH\n        echo \"C:\\msys64\\usr\\bin\" >> $GITHUB_PATH\n      if: matrix.target == 'i686-pc-windows-gnu'\n      shell: bash\n    - name: Add mingw64 to path for x86_64-gnu\n      run: |\n        echo \"C:\\msys64\\mingw64\\bin\" >> $GITHUB_PATH\n        echo \"C:\\msys64\\usr\\bin\" >> $GITHUB_PATH\n      if: matrix.target == 'x86_64-pc-windows-gnu'\n      shell: bash\n    - name: Update gcc\n      if: matrix.target == 'x86_64-pc-windows-gnu'\n      run: pacman.exe -Sy --noconfirm mingw-w64-x86_64-toolchain\n    - name: Update gcc\n      if: matrix.target == 'i686-pc-windows-gnu'\n      run: pacman.exe -Sy --noconfirm mingw-w64-i686-toolchain\n\n    - name: Build\n      run: cargo build --verbose --verbose\n    - name: Run tests\n      run: cargo test --verbose\n"
  },
  {
    "path": ".gitignore",
    "content": "target\nCargo.lock\n/silesia\n/silesia.zip\n"
  },
  {
    "path": ".gitmodules",
    "content": "[submodule \"zstd-safe/zstd-sys/zstd\"]\n\tpath = zstd-safe/zstd-sys/zstd\n\turl = https://github.com/facebook/zstd\n"
  },
  {
    "path": "Cargo.toml",
    "content": "[package]\nauthors = [\"Alexandre Bury <alexandre.bury@gmail.com>\"]\ndescription = \"Binding for the zstd compression library.\"\ndocumentation = \"https://docs.rs/zstd\"\nkeywords = [\"zstd\", \"zstandard\", \"compression\"]\ncategories = [\"compression\", \"api-bindings\"]\nlicense = \"BSD-3-Clause\"\nname = \"zstd\"\nrepository = \"https://github.com/gyscos/zstd-rs\"\nversion = \"0.13.3\"\nexclude = [\"assets/*.zst\", \"/.github\"]\nreadme = \"Readme.md\"\nedition = \"2018\"\nrust-version = \"1.64\"\n\n[package.metadata.docs.rs]\nfeatures = [\"experimental\", \"zstdmt\", \"zdict_builder\", \"doc-cfg\"]\n\n[badges]\ntravis-ci = { repository = \"gyscos/zstd-rs\" }\n\n[dependencies]\nzstd-safe = { path = \"zstd-safe\", version = \"7.1.0\", default-features = false, features = [\"std\"] }\n\n[dev-dependencies]\nclap = {version = \"4.0\", features=[\"derive\"]}\nhumansize = \"2.0\"\npartial-io = \"0.5\"\nwalkdir = \"2.2\"\n\n[features]\ndefault = [\"legacy\", \"arrays\", \"zdict_builder\"]\n\nbindgen = [\"zstd-safe/bindgen\"]\ndebug = [\"zstd-safe/debug\"]\nlegacy = [\"zstd-safe/legacy\"]\npkg-config = [\"zstd-safe/pkg-config\"]\nwasm = []\nzstdmt = [\"zstd-safe/zstdmt\"]\nexperimental = [\"zstd-safe/experimental\"]\nthin = [\"zstd-safe/thin\"]\narrays = [\"zstd-safe/arrays\"]\nno_asm = [\"zstd-safe/no_asm\"]\ndoc-cfg = []\nzdict_builder = [\"zstd-safe/zdict_builder\"]\n\n# These two are for cross-language LTO.\n# Will only work if `clang` is used to build the C library.\nfat-lto = [\"zstd-safe/fat-lto\"]\nthin-lto = [\"zstd-safe/thin-lto\"]\n\n[[example]]\nname = \"train\"\nrequired-features = [\"zdict_builder\"]\n"
  },
  {
    "path": "LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2026, Alexandre Bury\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its\n   contributors may be used to endorse or promote products derived from\n   this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "Readme.md",
    "content": "# zstd\n\n[![crates.io](https://img.shields.io/crates/v/zstd.svg)](https://crates.io/crates/zstd)\n[![BSD-3-Clause licensed](https://img.shields.io/badge/license-BSD--3--Clause-blue.svg)](./LICENSE)\n\n[![Build on Linux](https://github.com/gyscos/zstd-rs/actions/workflows/linux.yml/badge.svg)](https://github.com/gyscos/zstd-rs/actions/workflows/linux.yml)\n[![Build on Windows](https://github.com/gyscos/zstd-rs/actions/workflows/windows.yml/badge.svg)](https://github.com/gyscos/zstd-rs/actions/workflows/windows.yml)\n[![Build on macOS](https://github.com/gyscos/zstd-rs/actions/workflows/macos.yml/badge.svg)](https://github.com/gyscos/zstd-rs/actions/workflows/macos.yml)\n[![Build on wasm](https://github.com/gyscos/zstd-rs/actions/workflows/wasm.yml/badge.svg)](https://github.com/gyscos/zstd-rs/actions/workflows/wasm.yml)\n\n\nThis library is a rust binding for the [zstd compression library][zstd].\n\n# [Documentation][doc]\n\n## 1 - Add to `cargo.toml`\n\n```bash\n$ cargo add zstd\n```\n\n```toml\n# Cargo.toml\n\n[dependencies]\nzstd = \"0.13\"\n```\n\n## 2 - Usage\n\nThis library provides `Read` and `Write` wrappers to handle (de)compression,\nalong with convenience functions to made common tasks easier.\n\nFor instance, `stream::copy_encode` and `stream::copy_decode` are easy-to-use\nwrappers around `std::io::copy`. Check the [stream] example:\n\n```rust\nuse std::io;\n\n// This function use the convenient `copy_encode` method\nfn compress(level: i32) {\n    zstd::stream::copy_encode(io::stdin(), io::stdout(), level).unwrap();\n}\n\n// This function does the same thing, directly using an `Encoder`:\nfn compress_manually(level: i32) {\n    let mut encoder = zstd::stream::Encoder::new(io::stdout(), level).unwrap();\n    io::copy(&mut io::stdin(), &mut encoder).unwrap();\n    encoder.finish().unwrap();\n}\n\nfn decompress() {\n    zstd::stream::copy_decode(io::stdin(), io::stdout()).unwrap();\n}\n```\n\n# Asynchronous support\n\nThe [`async-compression`](https://github.com/Nemo157/async-compression/) crate\nprovides an async-ready integration of various compression algorithms,\nincluding `zstd-rs`.\n\n# Compile it yourself\n\n`zstd` is included as a submodule. To get everything during your clone, use:\n\n```\ngit clone https://github.com/gyscos/zstd-rs --recursive\n```\n\nOr, if you cloned it without the `--recursive` flag,\ncall this from inside the repository:\n\n```\ngit submodule update --init\n```\n\nThen, running `cargo build` should take care\nof building the C library and linking to it.\n\n# Build-time bindgen\n\nThis library includes a pre-generated `bindings.rs` file.\nYou can also generate new bindings at build-time, using the `bindgen` feature:\n\n```\ncargo build --features bindgen\n```\n\n# TODO\n\n* Benchmarks, optimizations, ...\n\n# Disclaimer\n\nThis implementation is largely inspired by bozaro's [lz4-rs].\n\n# License\n\n* The zstd C library is under a dual BSD/GPLv2 license.\n* This zstd-rs binding library is under a [BSD-3-Clause](LICENSE) license.\n\n[zstd]: https://github.com/facebook/zstd\n[lz4-rs]: https://github.com/bozaro/lz4-rs\n[cargo-edit]: https://github.com/killercup/cargo-edit#cargo-add\n[doc]: https://docs.rs/zstd\n[stream]: examples/stream.rs\n[submodule]: https://git-scm.com/book/en/v2/Git-Tools-Submodules\n"
  },
  {
    "path": "assets/example.txt",
    "content": "’Twas brillig, and the slithy toves\nDid gyre and gimble in the wade;\nAll mimsy were the borogoves,\nAnd the mome raths outgrabe.\n\n\n\"Beware the Jabberwock, my son!\nThe jaws that bite, the claws that catch!\nBeware the Jubjub bird, and shun\nThe frumious Bandersnatch!\"\n\n\nHe took his vorpal sword in hand:\nLong time the manxome foe he sought—\nSo rested he by the Tumtum tree,\nAnd stood awhile in thought.\n\n\nAnd as in uffish thought he stood,\nThe Jabberwock, with eyes of flame,\nCame whiffling through the tulgey wood,\nAnd burbled as it came!\n\n\nOne, two! One, two! And through and through\nThe vorpal blade went snicker-snack!\nHe left it dead, and with its head\nHe went galumphing back.\n\n\n\"And hast thou slain the Jabberwock?\nCome to my arms, my beamish boy!\nO frabjous day! Callooh! Callay!\"\nHe chortled in his joy.\n\n\n’Twas brillig, and the slithy toves\nDid gyre and gimble in the wabe;\nAll mimsy were the borogoves,\nAnd the mome raths outgrabe.\n"
  },
  {
    "path": "examples/basic.rs",
    "content": "fn main() {\n    let some_content = \"Something\";\n    let compression_level = 3;\n\n    // Compress some text\n    let compressed =\n        zstd::encode_all(some_content.as_bytes(), compression_level).unwrap();\n\n    // Now uncompress it\n    let decoded: Vec<u8> = zstd::decode_all(compressed.as_slice()).unwrap();\n\n    // Convert it to text\n    let decoded_text = std::str::from_utf8(&decoded).unwrap();\n\n    assert_eq!(some_content, decoded_text);\n}\n"
  },
  {
    "path": "examples/benchmark.rs",
    "content": "use clap::Parser;\nuse humansize::{format_size, DECIMAL};\nuse std::io::Read;\nuse std::path::PathBuf;\n\n#[derive(Parser, Debug)]\n#[command(author, version, about, long_about=None)]\nstruct Args {\n    /// Directory containing the data to compress.\n    /// To use the silesia corpus, run the following commands:\n    ///\n    /// ```\n    /// wget http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip\n    /// unzip silesia.zip -d silesia/\n    /// cargo run --example benchmark -- silesia/\",\n    /// ```\n    dir: PathBuf,\n\n    /// First compression level to test.\n    #[arg(short, long)]\n    begin: i32,\n\n    /// Last compression level to test.\n    #[arg(short, long)]\n    end: i32,\n}\n\nfn main() {\n    let args = Args::parse();\n\n    // Step 1: load data in memory\n    let files: Vec<Vec<u8>> = std::fs::read_dir(args.dir)\n        .unwrap()\n        .map(|file| {\n            let file = file.unwrap();\n\n            let mut content = Vec::new();\n            std::fs::File::open(file.path())\n                .unwrap()\n                .read_to_end(&mut content)\n                .unwrap();\n            content\n        })\n        .collect();\n\n    let total_size: usize = files.iter().map(|data| data.len()).sum();\n\n    // Step 3: compress data\n\n    // Print tsv headers\n    println!(\n        \"{}\\t{}\\t{}\\t{}\",\n        \"Compression level\",\n        \"Compression ratio\",\n        \"Compression speed\",\n        \"Decompression speed\"\n    );\n\n    for level in args.begin..args.end {\n        // Compress each sample sequentially.\n        let start = std::time::Instant::now();\n\n        let compressed: Vec<Vec<u8>> = files\n            .iter()\n            .map(|data| zstd::encode_all(&data[..], level).unwrap())\n            .collect();\n        let mid = std::time::Instant::now();\n\n        let uncompressed: Vec<Vec<u8>> = compressed\n            .iter()\n            .map(|data| zstd::decode_all(&data[..]).unwrap())\n            .collect();\n        let end = std::time::Instant::now();\n\n        for (original, processed) in files.iter().zip(uncompressed.iter()) {\n            assert_eq!(&original[..], &processed[..]);\n        }\n\n        let compress_time = mid - start;\n        let decompress_time = end - mid;\n\n        let compress_seconds = compress_time.as_secs() as f64\n            + compress_time.subsec_nanos() as f64 * 1e-9;\n\n        let decompress_seconds = decompress_time.as_secs() as f64\n            + decompress_time.subsec_nanos() as f64 * 1e-9;\n\n        let compressed_size: usize = compressed.iter().map(Vec::len).sum();\n\n        let speed = (total_size as f64 / compress_seconds) as usize;\n        let speed = format_size(speed, DECIMAL);\n\n        let d_speed = (total_size as f64 / decompress_seconds) as usize;\n        let d_speed = format_size(d_speed, DECIMAL);\n\n        let ratio = compressed_size as f64 / total_size as f64;\n        println!(\"{}\\t{:.3}\\t{}/s\\t{}/s\", level, 1.0 / ratio, speed, d_speed);\n    }\n}\n"
  },
  {
    "path": "examples/stream.rs",
    "content": "use std::env;\nuse std::io::{self, Write};\nuse std::str::FromStr;\n\nfn main() {\n    match env::args().nth(1) {\n        None => {\n            writeln!(\n                &mut io::stderr(),\n                \"Invalid option. Usage: `stream [-d|-1..-22]`\"\n            )\n            .unwrap();\n        }\n        Some(ref option) if option == \"-d\" => decompress(),\n        Some(ref option) => {\n            if option.starts_with('-') {\n                let level = match i32::from_str(&option[1..]) {\n                    Ok(level) => level,\n                    Err(e) => panic!(\"Error parsing compression level: {}\", e),\n                };\n                compress(level);\n            } else {\n                writeln!(\n                    &mut io::stderr(),\n                    \"Invalid option. Usage: `stream [-d|-1..-22]`\"\n                )\n                .unwrap();\n            }\n        }\n    }\n}\n\nfn compress(level: i32) {\n    zstd::stream::copy_encode(io::stdin(), io::stdout(), level).unwrap();\n}\n\nfn decompress() {\n    zstd::stream::copy_decode(io::stdin(), io::stdout()).unwrap();\n}\n"
  },
  {
    "path": "examples/train.rs",
    "content": "use clap::Parser;\nuse std::io;\nuse std::path::PathBuf;\n\n#[derive(Parser, Debug)]\n#[command(author, version, about, long_about=None)]\n/// This program trains a dictionary from one or more files,\n/// to make future compression of similar small files more efficient.\n///\n/// The dictionary will need to be present during decompression,\n/// but if you need to compress many small files individually,\n/// it may be worth the trouble.\nstruct Args {\n    /// Maximum dictionary size in bytes.\n    #[arg(short, long)]\n    max_size: usize,\n\n    /// Files to use as input.\n    files: Vec<PathBuf>,\n}\n\nfn main() {\n    let args = Args::parse();\n\n    let dict = zstd::dict::from_files(&args.files, args.max_size).unwrap();\n\n    let mut dict_reader: &[u8] = &dict;\n    io::copy(&mut dict_reader, &mut io::stdout()).unwrap();\n}\n"
  },
  {
    "path": "examples/zstd.rs",
    "content": "use zstd;\n\nuse std::env;\nuse std::fs;\nuse std::io;\n\nconst SUFFIX: &'static str = \".zst\";\n\nfn main() {\n    for arg in env::args().skip(1) {\n        if arg.ends_with(SUFFIX) {\n            match decompress(&arg) {\n                Ok(()) => println!(\"Decompressed {}\", arg),\n                Err(e) => println!(\"Error decompressing {}: {}\", arg, e),\n            }\n        } else {\n            match compress(&arg) {\n                Ok(()) => println!(\"Compressed {}\", arg),\n                Err(e) => println!(\"Error compressing {}: {}\", arg, e),\n            }\n        }\n    }\n}\n\nfn compress(source: &str) -> io::Result<()> {\n    let mut file = fs::File::open(source)?;\n    let mut encoder = {\n        let target = fs::File::create(source.to_string() + SUFFIX)?;\n        zstd::Encoder::new(target, 1)?\n    };\n\n    io::copy(&mut file, &mut encoder)?;\n    encoder.finish()?;\n\n    Ok(())\n}\n\nfn decompress(source: &str) -> io::Result<()> {\n    let mut decoder = {\n        let file = fs::File::open(source)?;\n        zstd::Decoder::new(file)?\n    };\n\n    let mut target = fs::File::create(source.trim_end_matches(SUFFIX))?;\n\n    io::copy(&mut decoder, &mut target)?;\n\n    Ok(())\n}\n"
  },
  {
    "path": "examples/zstdcat.rs",
    "content": "use clap::Parser;\nuse std::fs;\nuse std::io;\n\n#[derive(Parser, Debug)]\n#[command(author, version, about, long_about=None)]\nstruct Args {\n    /// Files to decompress. With no file, or when given -, read standard input.\n    file: Vec<String>,\n}\n\nfn main() {\n    // This will be a simple application:\n    // takes a single (repeatable and optional) argument.\n    let args = Args::parse();\n\n    // If nothing was given, act as if `-` was there.\n    if args.file.is_empty() {\n        decompress_file(\"-\").unwrap();\n    } else {\n        for file in &args.file {\n            decompress_file(file).unwrap();\n        }\n    }\n}\n\n// Dispatch the source reader depending on the filename\nfn decompress_file(file: &str) -> io::Result<()> {\n    match file {\n        \"-\" => decompress_from(io::stdin()),\n        other => decompress_from(io::BufReader::new(fs::File::open(other)?)),\n    }\n}\n\n// Decompress from a `Reader` into stdout\nfn decompress_from<R: io::Read>(r: R) -> io::Result<()> {\n    let mut decoder = zstd::Decoder::new(r)?;\n    io::copy(&mut decoder, &mut io::stdout())?;\n    Ok(())\n}\n"
  },
  {
    "path": "rustfmt.toml",
    "content": "max_width = 79\nreorder_imports = true\nuse_try_shorthand = true\n"
  },
  {
    "path": "src/bulk/compressor.rs",
    "content": "use crate::map_error_code;\n\nuse std::io;\nuse zstd_safe;\n\n/// Allows to compress independently multiple chunks of data.\n///\n/// Each job will be processed entirely in-memory without streaming, so this\n/// is most fitting for many small jobs. To compress larger volume that don't\n/// easily fit in memory, a streaming compression may be more appropriate.\n///\n/// It is more efficient than a streaming compressor for 2 reasons:\n/// * It re-uses the zstd context between jobs to avoid re-allocations\n/// * It avoids copying data from a `Read` into a temporary buffer before compression.\n#[derive(Default)]\npub struct Compressor<'a> {\n    context: zstd_safe::CCtx<'a>,\n}\n\nimpl Compressor<'static> {\n    /// Creates a new zstd compressor\n    pub fn new(level: i32) -> io::Result<Self> {\n        Self::with_dictionary(level, &[])\n    }\n\n    /// Creates a new zstd compressor, using the given dictionary.\n    ///\n    /// Note that using a dictionary means that decompression will need to use\n    /// the same dictionary.\n    pub fn with_dictionary(level: i32, dictionary: &[u8]) -> io::Result<Self> {\n        let mut compressor = Self::default();\n\n        compressor.set_dictionary(level, dictionary)?;\n\n        Ok(compressor)\n    }\n}\n\nimpl<'a> Compressor<'a> {\n    /// Creates a new compressor using an existing `EncoderDictionary`.\n    ///\n    /// The compression level will be the one specified when creating the dictionary.\n    ///\n    /// Note that using a dictionary means that decompression will need to use\n    /// the same dictionary.\n    pub fn with_prepared_dictionary<'b>(\n        dictionary: &'a crate::dict::EncoderDictionary<'b>,\n    ) -> io::Result<Self>\n    where\n        'b: 'a,\n    {\n        let mut compressor = Self::default();\n\n        compressor.set_prepared_dictionary(dictionary)?;\n\n        Ok(compressor)\n    }\n\n    /// Changes the compression level used by this compressor.\n    ///\n    /// *This will clear any dictionary previously registered.*\n    ///\n    /// If you want to keep the existing dictionary, you will need to pass it again to\n    /// `Self::set_dictionary` instead of using this method.\n    pub fn set_compression_level(&mut self, level: i32) -> io::Result<()> {\n        self.set_dictionary(level, &[])\n    }\n\n    /// Changes the dictionary and compression level used by this compressor.\n    ///\n    /// Will affect future compression jobs.\n    ///\n    /// Note that using a dictionary means that decompression will need to use\n    /// the same dictionary.\n    pub fn set_dictionary(\n        &mut self,\n        level: i32,\n        dictionary: &[u8],\n    ) -> io::Result<()> {\n        self.context\n            .set_parameter(zstd_safe::CParameter::CompressionLevel(level))\n            .map_err(map_error_code)?;\n\n        self.context\n            .load_dictionary(dictionary)\n            .map_err(map_error_code)?;\n\n        Ok(())\n    }\n\n    /// Changes the dictionary used by this compressor.\n    ///\n    /// The compression level used when preparing the dictionary will be used.\n    ///\n    /// Note that using a dictionary means that decompression will need to use\n    /// the same dictionary.\n    pub fn set_prepared_dictionary<'b>(\n        &mut self,\n        dictionary: &'a crate::dict::EncoderDictionary<'b>,\n    ) -> io::Result<()>\n    where\n        'b: 'a,\n    {\n        self.context\n            .ref_cdict(dictionary.as_cdict())\n            .map_err(map_error_code)?;\n\n        Ok(())\n    }\n\n    /// Compress a single block of data to the given destination buffer.\n    ///\n    /// Returns the number of bytes written, or an error if something happened\n    /// (for instance if the destination buffer was too small).\n    ///\n    /// A level of `0` uses zstd's default (currently `3`).\n    pub fn compress_to_buffer<C: zstd_safe::WriteBuf + ?Sized>(\n        &mut self,\n        source: &[u8],\n        destination: &mut C,\n    ) -> io::Result<usize> {\n        self.context\n            .compress2(destination, source)\n            .map_err(map_error_code)\n    }\n\n    /// Compresses a block of data and returns the compressed result.\n    ///\n    /// A level of `0` uses zstd's default (currently `3`).\n    pub fn compress(&mut self, data: &[u8]) -> io::Result<Vec<u8>> {\n        // We allocate a big buffer, slightly larger than the input data.\n        let buffer_len = zstd_safe::compress_bound(data.len());\n        let mut buffer = Vec::with_capacity(buffer_len);\n\n        self.compress_to_buffer(data, &mut buffer)?;\n\n        // Should we shrink the vec? Meh, let the user do it if he wants.\n        Ok(buffer)\n    }\n\n    /// Gives mutable access to the internal context.\n    pub fn context_mut(&mut self) -> &mut zstd_safe::CCtx<'a> {\n        &mut self.context\n    }\n\n    /// Sets a compression parameter for this compressor.\n    pub fn set_parameter(\n        &mut self,\n        parameter: zstd_safe::CParameter,\n    ) -> io::Result<()> {\n        self.context\n            .set_parameter(parameter)\n            .map_err(map_error_code)?;\n        Ok(())\n    }\n\n    /// Sets the expected size of the input.\n    ///\n    /// This affects the compression effectiveness.\n    ///\n    /// It is an error to give an incorrect size (an error will be returned when closing the\n    /// stream if the size does not match what was pledged).\n    ///\n    /// Giving a `None` size means the size is unknown (this is the default).\n    pub fn set_pledged_src_size(\n        &mut self,\n        size: Option<u64>,\n    ) -> io::Result<()> {\n        self.context\n            .set_pledged_src_size(size)\n            .map_err(map_error_code)?;\n        Ok(())\n    }\n\n    crate::encoder_parameters!();\n}\n\nfn _assert_traits() {\n    fn _assert_send<T: Send>(_: T) {}\n\n    _assert_send(Compressor::new(0));\n}\n"
  },
  {
    "path": "src/bulk/decompressor.rs",
    "content": "use crate::map_error_code;\n\n#[cfg(feature = \"experimental\")]\nuse std::convert::TryInto;\nuse std::io;\nuse zstd_safe;\n\n/// Allows to decompress independently multiple blocks of data.\n///\n/// This reduces memory usage compared to calling `decompress` multiple times.\n#[derive(Default)]\npub struct Decompressor<'a> {\n    context: zstd_safe::DCtx<'a>,\n}\n\nimpl Decompressor<'static> {\n    /// Creates a new zstd decompressor.\n    pub fn new() -> io::Result<Self> {\n        Self::with_dictionary(&[])\n    }\n\n    /// Creates a new zstd decompressor, using the given dictionary.\n    pub fn with_dictionary(dictionary: &[u8]) -> io::Result<Self> {\n        let mut decompressor = Self::default();\n\n        decompressor.set_dictionary(dictionary)?;\n\n        Ok(decompressor)\n    }\n}\n\nimpl<'a> Decompressor<'a> {\n    /// Creates a new decompressor using an existing `DecoderDictionary`.\n    ///\n    /// Note that using a dictionary means that compression will need to use\n    /// the same dictionary.\n    pub fn with_prepared_dictionary<'b>(\n        dictionary: &'a crate::dict::DecoderDictionary<'b>,\n    ) -> io::Result<Self>\n    where\n        'b: 'a,\n    {\n        let mut decompressor = Self::default();\n\n        decompressor.set_prepared_dictionary(dictionary)?;\n\n        Ok(decompressor)\n    }\n\n    /// Changes the dictionary used by this decompressor.\n    ///\n    /// Will affect future compression jobs.\n    ///\n    /// Note that using a dictionary means that compression will need to use\n    /// the same dictionary.\n    pub fn set_dictionary(&mut self, dictionary: &[u8]) -> io::Result<()> {\n        self.context\n            .load_dictionary(dictionary)\n            .map_err(map_error_code)?;\n\n        Ok(())\n    }\n\n    /// Changes the dictionary used by this decompressor.\n    ///\n    /// Note that using a dictionary means that compression will need to use\n    /// the same dictionary.\n    pub fn set_prepared_dictionary<'b>(\n        &mut self,\n        dictionary: &'a crate::dict::DecoderDictionary<'b>,\n    ) -> io::Result<()>\n    where\n        'b: 'a,\n    {\n        self.context\n            .ref_ddict(dictionary.as_ddict())\n            .map_err(map_error_code)?;\n\n        Ok(())\n    }\n\n    /// Deompress a single block of data to the given destination buffer.\n    ///\n    /// Returns the number of bytes written, or an error if something happened\n    /// (for instance if the destination buffer was too small).\n    pub fn decompress_to_buffer<C: zstd_safe::WriteBuf + ?Sized>(\n        &mut self,\n        source: &[u8],\n        destination: &mut C,\n    ) -> io::Result<usize> {\n        self.context\n            .decompress(destination, source)\n            .map_err(map_error_code)\n    }\n\n    /// Decompress a block of data, and return the result in a `Vec<u8>`.\n    ///\n    /// The decompressed data should be at most `capacity` bytes,\n    /// or an error will be returned.\n    pub fn decompress(\n        &mut self,\n        data: &[u8],\n        capacity: usize,\n    ) -> io::Result<Vec<u8>> {\n        let capacity =\n            Self::upper_bound(data).unwrap_or(capacity).min(capacity);\n        let mut buffer = Vec::with_capacity(capacity);\n        self.decompress_to_buffer(data, &mut buffer)?;\n        Ok(buffer)\n    }\n\n    /// Sets a decompression parameter for this decompressor.\n    pub fn set_parameter(\n        &mut self,\n        parameter: zstd_safe::DParameter,\n    ) -> io::Result<()> {\n        self.context\n            .set_parameter(parameter)\n            .map_err(map_error_code)?;\n        Ok(())\n    }\n\n    crate::decoder_parameters!();\n\n    /// Get an upper bound on the decompressed size of data, if available\n    ///\n    /// This can be used to pre-allocate enough capacity for `decompress_to_buffer`\n    /// and is used by `decompress` to ensure that it does not over-allocate if\n    /// you supply a large `capacity`.\n    ///\n    /// Will return `None` if the upper bound cannot be determined or is larger than `usize::MAX`\n    ///\n    /// Note that unless the `experimental` feature is enabled, this will always return `None`.\n    pub fn upper_bound(_data: &[u8]) -> Option<usize> {\n        #[cfg(feature = \"experimental\")]\n        {\n            let bound = zstd_safe::decompress_bound(_data).ok()?;\n            bound.try_into().ok()\n        }\n        #[cfg(not(feature = \"experimental\"))]\n        {\n            None\n        }\n    }\n}\n\nfn _assert_traits() {\n    fn _assert_send<T: Send>(_: T) {}\n\n    _assert_send(Decompressor::new());\n}\n"
  },
  {
    "path": "src/bulk/mod.rs",
    "content": "//! Compress and decompress data in bulk.\n//!\n//! These methods process all the input data at once.\n//! It is therefore best used with relatively small blocks\n//! (like small network packets).\n\nmod compressor;\nmod decompressor;\n\n#[cfg(test)]\nmod tests;\n\npub use self::compressor::Compressor;\npub use self::decompressor::Decompressor;\n\nuse std::io;\n\n/// Compresses a single block of data to the given destination buffer.\n///\n/// Returns the number of bytes written, or an error if something happened\n/// (for instance if the destination buffer was too small).\n///\n/// A level of `0` uses zstd's default (currently `3`).\npub fn compress_to_buffer(\n    source: &[u8],\n    destination: &mut [u8],\n    level: i32,\n) -> io::Result<usize> {\n    Compressor::new(level)?.compress_to_buffer(source, destination)\n}\n\n/// Compresses a block of data and returns the compressed result.\n///\n/// A level of `0` uses zstd's default (currently `3`).\npub fn compress(data: &[u8], level: i32) -> io::Result<Vec<u8>> {\n    Compressor::new(level)?.compress(data)\n}\n\n/// Deompress a single block of data to the given destination buffer.\n///\n/// Returns the number of bytes written, or an error if something happened\n/// (for instance if the destination buffer was too small).\npub fn decompress_to_buffer(\n    source: &[u8],\n    destination: &mut [u8],\n) -> io::Result<usize> {\n    Decompressor::new()?.decompress_to_buffer(source, destination)\n}\n\n/// Decompresses a block of data and returns the decompressed result.\n///\n/// The decompressed data should be at most `capacity` bytes,\n/// or an error will be returned.\npub fn decompress(data: &[u8], capacity: usize) -> io::Result<Vec<u8>> {\n    Decompressor::new()?.decompress(data, capacity)\n}\n"
  },
  {
    "path": "src/bulk/tests.rs",
    "content": "use super::{compress, decompress};\n\nconst TEXT: &str = include_str!(\"../../assets/example.txt\");\n\n#[test]\nfn test_direct() {\n    // Can we include_str!(\"assets/example.txt\")?\n    // It's excluded from the packaging step, so maybe not.\n    crate::test_cycle_unwrap(\n        TEXT.as_bytes(),\n        |data| compress(data, 1),\n        |data| decompress(data, TEXT.len()),\n    );\n}\n\n#[test]\nfn test_stream_compat() {\n    // We can bulk-compress and stream-decode\n    crate::test_cycle_unwrap(\n        TEXT.as_bytes(),\n        |data| compress(data, 1),\n        |data| crate::decode_all(data),\n    );\n\n    // We can stream-encode and bulk-decompress\n    crate::test_cycle_unwrap(\n        TEXT.as_bytes(),\n        |data| crate::encode_all(data, 1),\n        |data| decompress(data, TEXT.len()),\n    );\n}\n\n#[test]\nfn has_content_size() {\n    let compressed = compress(TEXT.as_bytes(), 1).unwrap();\n\n    // Bulk functions by default include the content size.\n    assert_eq!(\n        zstd_safe::get_frame_content_size(&compressed).unwrap(),\n        Some(TEXT.len() as u64)\n    );\n}\n"
  },
  {
    "path": "src/dict.rs",
    "content": "//! Train a dictionary from various sources.\n//!\n//! A dictionary can help improve the compression of small files.\n//! The dictionary must be present during decompression,\n//! but can be shared across multiple \"similar\" files.\n//!\n//! Creating a dictionary using the `zstd` C library,\n//! using the `zstd` command-line interface, using this library,\n//! or using the `train` binary provided, should give the same result,\n//! and are therefore completely compatible.\n//!\n//! To use, see [`Encoder::with_dictionary`] or [`Decoder::with_dictionary`].\n//!\n//! [`Encoder::with_dictionary`]: ../struct.Encoder.html#method.with_dictionary\n//! [`Decoder::with_dictionary`]: ../struct.Decoder.html#method.with_dictionary\n\n#[cfg(feature = \"zdict_builder\")]\nuse std::io::{self, Read};\n\npub use zstd_safe::{CDict, DDict};\n\n/// Prepared dictionary for compression\n///\n/// A dictionary can include its own copy of the data (if it is `'static`), or it can merely point\n/// to a separate buffer (if it has another lifetime).\npub struct EncoderDictionary<'a> {\n    cdict: CDict<'a>,\n}\n\nimpl EncoderDictionary<'static> {\n    /// Creates a prepared dictionary for compression.\n    ///\n    /// This will copy the dictionary internally.\n    pub fn copy(dictionary: &[u8], level: i32) -> Self {\n        Self {\n            cdict: zstd_safe::create_cdict(dictionary, level),\n        }\n    }\n}\n\nimpl<'a> EncoderDictionary<'a> {\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    /// Create prepared dictionary for compression\n    ///\n    /// A level of `0` uses zstd's default (currently `3`).\n    ///\n    /// Only available with the `experimental` feature. Use `EncoderDictionary::copy` otherwise.\n    pub fn new(dictionary: &'a [u8], level: i32) -> Self {\n        Self {\n            cdict: zstd_safe::CDict::create_by_reference(dictionary, level),\n        }\n    }\n\n    /// Returns reference to `CDict` inner object\n    pub fn as_cdict(&self) -> &CDict<'a> {\n        &self.cdict\n    }\n}\n\n/// Prepared dictionary for decompression\npub struct DecoderDictionary<'a> {\n    ddict: DDict<'a>,\n}\n\nimpl DecoderDictionary<'static> {\n    /// Create a prepared dictionary for decompression.\n    ///\n    /// This will copy the dictionary internally.\n    pub fn copy(dictionary: &[u8]) -> Self {\n        Self {\n            ddict: zstd_safe::DDict::create(dictionary),\n        }\n    }\n}\n\nimpl<'a> DecoderDictionary<'a> {\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    /// Create prepared dictionary for decompression\n    ///\n    /// Only available with the `experimental` feature. Use `DecoderDictionary::copy` otherwise.\n    pub fn new(dict: &'a [u8]) -> Self {\n        Self {\n            ddict: zstd_safe::DDict::create_by_reference(dict),\n        }\n    }\n\n    /// Returns reference to `DDict` inner object\n    pub fn as_ddict(&self) -> &DDict<'a> {\n        &self.ddict\n    }\n}\n\n/// Train a dictionary from a big continuous chunk of data, with all samples\n/// contiguous in memory.\n///\n/// This is the most efficient way to train a dictionary,\n/// since this is directly fed into `zstd`.\n///\n/// * `sample_data` is the concatenation of all sample data.\n/// * `sample_sizes` is the size of each sample in `sample_data`.\n///     The sum of all `sample_sizes` should equal the length of `sample_data`.\n/// * `max_size` is the maximum size of the dictionary to generate.\n///\n/// The result is the dictionary data. You can, for example, feed it to [`CDict::create`].\n#[cfg(feature = \"zdict_builder\")]\n#[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"zdict_builder\")))]\npub fn from_continuous(\n    sample_data: &[u8],\n    sample_sizes: &[usize],\n    max_size: usize,\n) -> io::Result<Vec<u8>> {\n    use crate::map_error_code;\n\n    // Complain if the lengths don't add up to the entire data.\n    if sample_sizes.iter().sum::<usize>() != sample_data.len() {\n        return Err(io::Error::new(\n            io::ErrorKind::Other,\n            \"sample sizes don't add up\".to_string(),\n        ));\n    }\n\n    let mut result = Vec::with_capacity(max_size);\n    zstd_safe::train_from_buffer(&mut result, sample_data, sample_sizes)\n        .map_err(map_error_code)?;\n    Ok(result)\n}\n\n/// Train a dictionary from multiple samples.\n///\n/// The samples will internally be copied to a single continuous buffer,\n/// so make sure you have enough memory available.\n///\n/// If you need to stretch your system's limits,\n/// [`from_continuous`] directly uses the given slice.\n///\n/// [`from_continuous`]: ./fn.from_continuous.html\n///\n/// * `samples` is a list of individual samples to train on.\n/// * `max_size` is the maximum size of the dictionary to generate.\n///\n/// The result is the dictionary data. You can, for example, feed it to [`CDict::create`].\n#[cfg(feature = \"zdict_builder\")]\n#[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"zdict_builder\")))]\npub fn from_samples<S: AsRef<[u8]>>(\n    samples: &[S],\n    max_size: usize,\n) -> io::Result<Vec<u8>> {\n    // Pre-allocate the entire required size.\n    let total_length: usize =\n        samples.iter().map(|sample| sample.as_ref().len()).sum();\n\n    let mut data = Vec::with_capacity(total_length);\n\n    // Copy every sample to a big chunk of memory\n    data.extend(samples.iter().flat_map(|s| s.as_ref()).cloned());\n\n    let sizes: Vec<_> = samples.iter().map(|s| s.as_ref().len()).collect();\n\n    from_continuous(&data, &sizes, max_size)\n}\n\n/// Train a dictionary from multiple samples.\n///\n/// Unlike [`from_samples`], this does not require having a list of all samples.\n/// It also allows running into an error when iterating through the samples.\n///\n/// They will still be copied to a continuous array and fed to [`from_continuous`].\n///\n/// * `samples` is an iterator of individual samples to train on.\n/// * `max_size` is the maximum size of the dictionary to generate.\n///\n/// The result is the dictionary data. You can, for example, feed it to [`CDict::create`].\n///\n/// # Examples\n///\n/// ```rust,no_run\n/// // Train from a couple of json files.\n/// let dict_buffer = zstd::dict::from_sample_iterator(\n///     [\"file_a.json\", \"file_b.json\"]\n///         .into_iter()\n///         .map(|filename| std::fs::File::open(filename)),\n///     10_000,  // 10kB dictionary\n/// ).unwrap();\n/// ```\n///\n/// ```rust,no_run\n/// use std::io::BufRead as _;\n/// // Treat each line from stdin as a separate sample.\n/// let dict_buffer = zstd::dict::from_sample_iterator(\n///     std::io::stdin().lock().lines().map(|line: std::io::Result<String>| {\n///         // Transform each line into a `Cursor<Vec<u8>>` so they implement Read.\n///         line.map(String::into_bytes)\n///             .map(std::io::Cursor::new)\n///     }),\n///     10_000,  // 10kB dictionary\n/// ).unwrap();\n/// ```\n#[cfg(feature = \"zdict_builder\")]\n#[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"zdict_builder\")))]\npub fn from_sample_iterator<I, R>(\n    samples: I,\n    max_size: usize,\n) -> io::Result<Vec<u8>>\nwhere\n    I: IntoIterator<Item = io::Result<R>>,\n    R: Read,\n{\n    let mut data = Vec::new();\n    let mut sizes = Vec::new();\n\n    for sample in samples {\n        let mut sample = sample?;\n        let len = sample.read_to_end(&mut data)?;\n        sizes.push(len);\n    }\n\n    from_continuous(&data, &sizes, max_size)\n}\n\n/// Train a dict from a list of files.\n///\n/// * `filenames` is an iterator of files to load. Each file will be treated as an individual\n///     sample.\n/// * `max_size` is the maximum size of the dictionary to generate.\n///\n/// The result is the dictionary data. You can, for example, feed it to [`CDict::create`].\n#[cfg(feature = \"zdict_builder\")]\n#[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"zdict_builder\")))]\npub fn from_files<I, P>(filenames: I, max_size: usize) -> io::Result<Vec<u8>>\nwhere\n    P: AsRef<std::path::Path>,\n    I: IntoIterator<Item = P>,\n{\n    from_sample_iterator(\n        filenames\n            .into_iter()\n            .map(|filename| std::fs::File::open(filename)),\n        max_size,\n    )\n}\n\n#[cfg(test)]\n#[cfg(feature = \"zdict_builder\")]\nmod tests {\n    use std::fs;\n    use std::io;\n    use std::io::Read;\n\n    use walkdir;\n\n    #[test]\n    fn test_dict_training() {\n        // Train a dictionary\n        let paths: Vec<_> = walkdir::WalkDir::new(\"src\")\n            .into_iter()\n            .map(|entry| entry.unwrap())\n            .map(|entry| entry.into_path())\n            .filter(|path| path.to_str().unwrap().ends_with(\".rs\"))\n            .collect();\n\n        let dict = super::from_files(&paths, 4000).unwrap();\n\n        for path in paths {\n            let mut buffer = Vec::new();\n            let mut file = fs::File::open(path).unwrap();\n            let mut content = Vec::new();\n            file.read_to_end(&mut content).unwrap();\n            io::copy(\n                &mut &content[..],\n                &mut crate::stream::Encoder::with_dictionary(\n                    &mut buffer,\n                    1,\n                    &dict,\n                )\n                .unwrap()\n                .auto_finish(),\n            )\n            .unwrap();\n\n            let mut result = Vec::new();\n            io::copy(\n                &mut crate::stream::Decoder::with_dictionary(\n                    &buffer[..],\n                    &dict[..],\n                )\n                .unwrap(),\n                &mut result,\n            )\n            .unwrap();\n\n            assert_eq!(&content, &result);\n        }\n    }\n}\n"
  },
  {
    "path": "src/lib.rs",
    "content": "//! Rust binding to the [zstd library][zstd].\n//!\n//! This crate provides:\n//!\n//! * An [encoder](stream/write/struct.Encoder.html) to compress data using zstd\n//!   and send the output to another write.\n//! * A [decoder](stream/read/struct.Decoder.html) to read input data from a `Read`\n//!   and decompress it.\n//! * Convenient functions for common tasks.\n//!\n//! # Example\n//!\n//! ```no_run\n//! use std::io;\n//!\n//! // Uncompress input and print the result.\n//! zstd::stream::copy_decode(io::stdin(), io::stdout()).unwrap();\n//! ```\n//!\n//! [zstd]: https://github.com/facebook/zstd\n#![deny(missing_docs)]\n#![cfg_attr(feature = \"doc-cfg\", feature(doc_cfg))]\n\n// Re-export the zstd-safe crate.\npub use zstd_safe;\n\npub mod bulk;\npub mod dict;\n\n#[macro_use]\npub mod stream;\n\nuse std::io;\n\n/// Default compression level.\npub use zstd_safe::CLEVEL_DEFAULT as DEFAULT_COMPRESSION_LEVEL;\n\n/// The accepted range of compression levels.\npub fn compression_level_range(\n) -> std::ops::RangeInclusive<zstd_safe::CompressionLevel> {\n    zstd_safe::min_c_level()..=zstd_safe::max_c_level()\n}\n\n#[doc(no_inline)]\npub use crate::stream::{decode_all, encode_all, Decoder, Encoder};\n\n/// Returns the error message as io::Error based on error_code.\nfn map_error_code(code: usize) -> io::Error {\n    let msg = zstd_safe::get_error_name(code);\n    io::Error::new(io::ErrorKind::Other, msg.to_string())\n}\n\n// Some helper functions to write full-cycle tests.\n\n#[cfg(test)]\nfn test_cycle<F, G>(data: &[u8], f: F, g: G)\nwhere\n    F: Fn(&[u8]) -> Vec<u8>,\n    G: Fn(&[u8]) -> Vec<u8>,\n{\n    let mid = f(data);\n    let end = g(&mid);\n    assert_eq!(data, &end[..]);\n}\n\n#[cfg(test)]\nfn test_cycle_unwrap<F, G>(data: &[u8], f: F, g: G)\nwhere\n    F: Fn(&[u8]) -> io::Result<Vec<u8>>,\n    G: Fn(&[u8]) -> io::Result<Vec<u8>>,\n{\n    test_cycle(data, |data| f(data).unwrap(), |data| g(data).unwrap())\n}\n\n#[test]\nfn default_compression_level_in_range() {\n    assert!(compression_level_range().contains(&DEFAULT_COMPRESSION_LEVEL));\n}\n"
  },
  {
    "path": "src/stream/functions.rs",
    "content": "use std::io;\n\nuse super::{Decoder, Encoder};\n\n/// Decompress from the given source as if using a `Decoder`.\n///\n/// The input data must be in the zstd frame format.\npub fn decode_all<R: io::Read>(source: R) -> io::Result<Vec<u8>> {\n    let mut result = Vec::new();\n    copy_decode(source, &mut result)?;\n    Ok(result)\n}\n\n/// Decompress from the given source as if using a `Decoder`.\n///\n/// Decompressed data will be appended to `destination`.\npub fn copy_decode<R, W>(source: R, mut destination: W) -> io::Result<()>\nwhere\n    R: io::Read,\n    W: io::Write,\n{\n    let mut decoder = Decoder::new(source)?;\n    io::copy(&mut decoder, &mut destination)?;\n    Ok(())\n}\n\n/// Compress all data from the given source as if using an `Encoder`.\n///\n/// Result will be in the zstd frame format.\n///\n/// A level of `0` uses zstd's default (currently `3`).\npub fn encode_all<R: io::Read>(source: R, level: i32) -> io::Result<Vec<u8>> {\n    let mut result = Vec::<u8>::new();\n    copy_encode(source, &mut result, level)?;\n    Ok(result)\n}\n\n/// Compress all data from the given source as if using an `Encoder`.\n///\n/// Compressed data will be appended to `destination`.\n///\n/// A level of `0` uses zstd's default (currently `3`).\npub fn copy_encode<R, W>(\n    mut source: R,\n    destination: W,\n    level: i32,\n) -> io::Result<()>\nwhere\n    R: io::Read,\n    W: io::Write,\n{\n    let mut encoder = Encoder::new(destination, level)?;\n    io::copy(&mut source, &mut encoder)?;\n    encoder.finish()?;\n    Ok(())\n}\n\n#[cfg(test)]\nmod tests {}\n"
  },
  {
    "path": "src/stream/mod.rs",
    "content": "//! Compress and decompress Zstd streams.\n//!\n//! Zstd streams are the main way to compress and decompress data.\n//! They are compatible with the `zstd` command-line tool.\n//!\n//! This module provides both `Read` and `Write` interfaces to compressing and\n//! decompressing.\n\npub mod read;\npub mod write;\n\nmod functions;\npub mod zio;\n\n#[cfg(test)]\nmod tests;\n\npub mod raw;\n\npub use self::functions::{copy_decode, copy_encode, decode_all, encode_all};\npub use self::read::Decoder;\npub use self::write::{AutoFinishEncoder, Encoder};\n\n#[doc(hidden)]\n#[macro_export]\n/// Common functions for the decoder, both in read and write mode.\nmacro_rules! decoder_parameters {\n    () => {\n        /// Sets the maximum back-reference distance.\n        ///\n        /// The actual maximum distance is going to be `2^log_distance`.\n        ///\n        /// This will need to at least match the value set when compressing.\n        pub fn window_log_max(&mut self, log_distance: u32) -> io::Result<()> {\n            self.set_parameter(zstd_safe::DParameter::WindowLogMax(\n                log_distance,\n            ))\n        }\n\n        #[cfg(feature = \"experimental\")]\n        #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n        /// Enables or disabled expecting the 4-byte magic header\n        ///\n        /// Only available with the `experimental` feature.\n        ///\n        /// This will need to match the settings used when compressing.\n        pub fn include_magicbytes(\n            &mut self,\n            include_magicbytes: bool,\n        ) -> io::Result<()> {\n            self.set_parameter(zstd_safe::DParameter::Format(\n                if include_magicbytes {\n                    zstd_safe::FrameFormat::One\n                } else {\n                    zstd_safe::FrameFormat::Magicless\n                },\n            ))\n        }\n    };\n}\n\n#[doc(hidden)]\n#[macro_export]\n/// Common functions for the decoder, both in read and write mode.\nmacro_rules! decoder_common {\n    ($readwrite:ident) => {\n        /// Sets a decompression parameter on the decompression stream.\n        pub fn set_parameter(\n            &mut self,\n            parameter: zstd_safe::DParameter,\n        ) -> io::Result<()> {\n            self.$readwrite.operation_mut().set_parameter(parameter)\n        }\n\n        $crate::decoder_parameters!();\n    };\n}\n\n#[doc(hidden)]\n#[macro_export]\n/// Parameter-setters for the encoder. Relies on a `set_parameter` method.\nmacro_rules! encoder_parameters {\n    () => {\n        /// Controls whether zstd should include a content checksum at the end\n        /// of each frame.\n        pub fn include_checksum(\n            &mut self,\n            include_checksum: bool,\n        ) -> io::Result<()> {\n            self.set_parameter(zstd_safe::CParameter::ChecksumFlag(\n                include_checksum,\n            ))\n        }\n\n        /// Enables multithreaded compression\n        ///\n        /// * If `n_workers == 0` (default), then multithreaded will be\n        ///   disabled.\n        /// * If `n_workers >= 1`, then compression will be done in separate\n        ///   threads.\n        ///\n        /// So even `n_workers = 1` may increase performance by separating\n        /// IO and compression.\n        ///\n        /// Note: This is only available if the `zstdmt` cargo feature is activated.\n        #[cfg(feature = \"zstdmt\")]\n        #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"zstdmt\")))]\n        pub fn multithread(&mut self, n_workers: u32) -> io::Result<()> {\n            self.set_parameter(zstd_safe::CParameter::NbWorkers(n_workers))\n        }\n\n        /// Enables or disables storing of the dict id.\n        ///\n        /// Defaults to true. If false, the behaviour of decoding with a wrong\n        /// dictionary is undefined.\n        pub fn include_dictid(\n            &mut self,\n            include_dictid: bool,\n        ) -> io::Result<()> {\n            self.set_parameter(zstd_safe::CParameter::DictIdFlag(\n                include_dictid,\n            ))\n        }\n\n        /// Enables or disabled storing of the contentsize.\n        ///\n        /// Note that this only has an effect if the size is given with `set_pledged_src_size`.\n        pub fn include_contentsize(\n            &mut self,\n            include_contentsize: bool,\n        ) -> io::Result<()> {\n            self.set_parameter(zstd_safe::CParameter::ContentSizeFlag(\n                include_contentsize,\n            ))\n        }\n        /// Enables or disables long-distance matching\n        pub fn long_distance_matching(\n            &mut self,\n            long_distance_matching: bool,\n        ) -> io::Result<()> {\n            self.set_parameter(\n                zstd_safe::CParameter::EnableLongDistanceMatching(\n                    long_distance_matching,\n                ),\n            )\n        }\n\n        /// Sets the target size for compressed blocks.\n        ///\n        /// A lower block size may result in slightly lower speed (~2%) and compression ratio\n        /// (~0.1%), but may decrease end-to-end latency in low-bandwidth environments (time to\n        /// first decompressed byte).\n        ///\n        /// No value, or a value of zero, results in no constraint for the block sizes.\n        pub fn set_target_cblock_size(\n            &mut self,\n            target_size: Option<u32>,\n        ) -> io::Result<()> {\n            self.set_parameter(zstd_safe::CParameter::TargetCBlockSize(\n                target_size.unwrap_or(0),\n            ))\n        }\n\n        /// Sets the maximum back-reference distance.\n        ///\n        /// The actual maximum distance is going to be `2^log_distance`.\n        ///\n        /// Note that decompression will need to use at least the same setting.\n        pub fn window_log(&mut self, log_distance: u32) -> io::Result<()> {\n            self.set_parameter(zstd_safe::CParameter::WindowLog(log_distance))\n        }\n\n        #[cfg(feature = \"experimental\")]\n        #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n        /// Enables or disable the magic bytes at the beginning of each frame.\n        ///\n        /// If disabled, include_magicbytes must also be called on the decoder.\n        ///\n        /// Only available with the `experimental` feature.\n        ///\n        /// Note that decompression will need to use the same setting.\n        pub fn include_magicbytes(\n            &mut self,\n            include_magicbytes: bool,\n        ) -> io::Result<()> {\n            self.set_parameter(zstd_safe::CParameter::Format(\n                if include_magicbytes {\n                    zstd_safe::FrameFormat::One\n                } else {\n                    zstd_safe::FrameFormat::Magicless\n                },\n            ))\n        }\n    };\n}\n\n#[doc(hidden)]\n#[macro_export]\n/// Common functions for the encoder, both in read and write mode.\nmacro_rules! encoder_common {\n    ($readwrite:ident) => {\n        /// Sets the given zstd compression parameter.\n        pub fn set_parameter(\n            &mut self,\n            parameter: zstd_safe::CParameter,\n        ) -> io::Result<()> {\n            self.$readwrite.operation_mut().set_parameter(parameter)\n        }\n\n        /// Sets the expected size of the input.\n        ///\n        /// This affects the compression effectiveness.\n        ///\n        /// It is an error to give an incorrect size (an error will be returned when closing the\n        /// stream if the size does not match what was pledged).\n        ///\n        /// Giving a `None` size means the size is unknown (this is the default).\n        pub fn set_pledged_src_size(\n            &mut self,\n            size: Option<u64>,\n        ) -> io::Result<()> {\n            self.$readwrite.operation_mut().set_pledged_src_size(size)\n        }\n\n        $crate::encoder_parameters!();\n    };\n}\n"
  },
  {
    "path": "src/stream/raw.rs",
    "content": "//! Raw in-memory stream compression/decompression.\n//!\n//! This module defines a `Decoder` and an `Encoder` to decode/encode streams\n//! of data using buffers.\n//!\n//! They are mostly thin wrappers around `zstd_safe::{DCtx, CCtx}`.\nuse std::io;\n\npub use zstd_safe::{CParameter, DParameter, InBuffer, OutBuffer, WriteBuf};\n\nuse crate::dict::{DecoderDictionary, EncoderDictionary};\nuse crate::map_error_code;\n\n/// Represents an abstract compression/decompression operation.\n///\n/// This trait covers both `Encoder` and `Decoder`.\npub trait Operation {\n    /// Performs a single step of this operation.\n    ///\n    /// Should return a hint for the next input size.\n    ///\n    /// If the result is `Ok(0)`, it may indicate that a frame was just\n    /// finished.\n    fn run<C: WriteBuf + ?Sized>(\n        &mut self,\n        input: &mut InBuffer<'_>,\n        output: &mut OutBuffer<'_, C>,\n    ) -> io::Result<usize>;\n\n    /// Performs a single step of this operation.\n    ///\n    /// This is a comvenience wrapper around `Operation::run` if you don't\n    /// want to deal with `InBuffer`/`OutBuffer`.\n    fn run_on_buffers(\n        &mut self,\n        input: &[u8],\n        output: &mut [u8],\n    ) -> io::Result<Status> {\n        let mut input = InBuffer::around(input);\n        let mut output = OutBuffer::around(output);\n\n        let remaining = self.run(&mut input, &mut output)?;\n\n        Ok(Status {\n            remaining,\n            bytes_read: input.pos(),\n            bytes_written: output.pos(),\n        })\n    }\n\n    /// Flushes any internal buffer, if any.\n    ///\n    /// Returns the number of bytes still in the buffer.\n    /// To flush entirely, keep calling until it returns `Ok(0)`.\n    fn flush<C: WriteBuf + ?Sized>(\n        &mut self,\n        output: &mut OutBuffer<'_, C>,\n    ) -> io::Result<usize> {\n        let _ = output;\n        Ok(0)\n    }\n\n    /// Prepares the operation for a new frame.\n    ///\n    /// This is hopefully cheaper than creating a new operation.\n    fn reinit(&mut self) -> io::Result<()> {\n        Ok(())\n    }\n\n    /// Finishes the operation, writing any footer if necessary.\n    ///\n    /// Returns the number of bytes still to write.\n    ///\n    /// Keep calling this method until it returns `Ok(0)`,\n    /// and then don't ever call this method.\n    fn finish<C: WriteBuf + ?Sized>(\n        &mut self,\n        output: &mut OutBuffer<'_, C>,\n        finished_frame: bool,\n    ) -> io::Result<usize> {\n        let _ = output;\n        let _ = finished_frame;\n        Ok(0)\n    }\n}\n\n/// Dummy operation that just copies its input to the output.\npub struct NoOp;\n\nimpl Operation for NoOp {\n    fn run<C: WriteBuf + ?Sized>(\n        &mut self,\n        input: &mut InBuffer<'_>,\n        output: &mut OutBuffer<'_, C>,\n    ) -> io::Result<usize> {\n        // Skip the prelude\n        let src = &input.src[input.pos..];\n        // Safe because `output.pos() <= output.capacity()`.\n        let output_pos = output.pos();\n        let dst = unsafe { output.as_mut_ptr().add(output_pos) };\n\n        // Ignore anything past the end\n        let len = usize::min(src.len(), output.capacity() - output_pos);\n        let src = &src[..len];\n\n        // Safe because:\n        // * `len` is less than either of the two lengths\n        // * `src` and `dst` do not overlap because we have `&mut` to each.\n        unsafe { std::ptr::copy_nonoverlapping(src.as_ptr(), dst, len) };\n        input.set_pos(input.pos() + len);\n        unsafe { output.set_pos(output_pos + len) };\n\n        Ok(0)\n    }\n}\n\n/// Describes the result of an operation.\npub struct Status {\n    /// Number of bytes expected for next input.\n    ///\n    /// * If `remaining = 0`, then we are at the end of a frame.\n    /// * If `remaining > 0`, then it's just a hint for how much there is still\n    ///   to read.\n    pub remaining: usize,\n\n    /// Number of bytes read from the input.\n    pub bytes_read: usize,\n\n    /// Number of bytes written to the output.\n    pub bytes_written: usize,\n}\n\n/// An in-memory decoder for streams of data.\npub struct Decoder<'a> {\n    context: MaybeOwnedDCtx<'a>,\n}\n\nimpl Decoder<'static> {\n    /// Creates a new decoder.\n    pub fn new() -> io::Result<Self> {\n        Self::with_dictionary(&[])\n    }\n\n    /// Creates a new decoder initialized with the given dictionary.\n    pub fn with_dictionary(dictionary: &[u8]) -> io::Result<Self> {\n        let mut context = zstd_safe::DCtx::create();\n        context.init().map_err(map_error_code)?;\n        context\n            .load_dictionary(dictionary)\n            .map_err(map_error_code)?;\n        Ok(Decoder {\n            context: MaybeOwnedDCtx::Owned(context),\n        })\n    }\n}\n\nimpl<'a> Decoder<'a> {\n    /// Creates a new decoder which employs the provided context for deserialization.\n    pub fn with_context(context: &'a mut zstd_safe::DCtx<'static>) -> Self {\n        Self {\n            context: MaybeOwnedDCtx::Borrowed(context),\n        }\n    }\n\n    /// Creates a new decoder, using an existing `DecoderDictionary`.\n    pub fn with_prepared_dictionary<'b>(\n        dictionary: &DecoderDictionary<'b>,\n    ) -> io::Result<Self>\n    where\n        'b: 'a,\n    {\n        let mut context = zstd_safe::DCtx::create();\n        context\n            .ref_ddict(dictionary.as_ddict())\n            .map_err(map_error_code)?;\n        Ok(Decoder {\n            context: MaybeOwnedDCtx::Owned(context),\n        })\n    }\n\n    /// Creates a new decoder, using a ref prefix\n    pub fn with_ref_prefix<'b>(ref_prefix: &'b [u8]) -> io::Result<Self>\n    where\n        'b: 'a,\n    {\n        let mut context = zstd_safe::DCtx::create();\n        context.ref_prefix(ref_prefix).map_err(map_error_code)?;\n        Ok(Decoder {\n            context: MaybeOwnedDCtx::Owned(context),\n        })\n    }\n\n    /// Sets a decompression parameter for this decoder.\n    pub fn set_parameter(&mut self, parameter: DParameter) -> io::Result<()> {\n        match &mut self.context {\n            MaybeOwnedDCtx::Owned(x) => x.set_parameter(parameter),\n            MaybeOwnedDCtx::Borrowed(x) => x.set_parameter(parameter),\n        }\n        .map_err(map_error_code)?;\n        Ok(())\n    }\n}\n\nimpl Operation for Decoder<'_> {\n    fn run<C: WriteBuf + ?Sized>(\n        &mut self,\n        input: &mut InBuffer<'_>,\n        output: &mut OutBuffer<'_, C>,\n    ) -> io::Result<usize> {\n        match &mut self.context {\n            MaybeOwnedDCtx::Owned(x) => x.decompress_stream(output, input),\n            MaybeOwnedDCtx::Borrowed(x) => x.decompress_stream(output, input),\n        }\n        .map_err(map_error_code)\n    }\n\n    fn flush<C: WriteBuf + ?Sized>(\n        &mut self,\n        output: &mut OutBuffer<'_, C>,\n    ) -> io::Result<usize> {\n        // To flush, we just offer no additional input.\n        self.run(&mut InBuffer::around(&[]), output)?;\n\n        // We don't _know_ how much (decompressed data) there is still in buffer.\n        if output.pos() < output.capacity() {\n            // We only know when there's none (the output buffer is not full).\n            Ok(0)\n        } else {\n            // Otherwise, pretend there's still \"1 byte\" remaining.\n            Ok(1)\n        }\n    }\n\n    fn reinit(&mut self) -> io::Result<()> {\n        match &mut self.context {\n            MaybeOwnedDCtx::Owned(x) => {\n                x.reset(zstd_safe::ResetDirective::SessionOnly)\n            }\n            MaybeOwnedDCtx::Borrowed(x) => {\n                x.reset(zstd_safe::ResetDirective::SessionOnly)\n            }\n        }\n        .map_err(map_error_code)?;\n        Ok(())\n    }\n\n    fn finish<C: WriteBuf + ?Sized>(\n        &mut self,\n        _output: &mut OutBuffer<'_, C>,\n        finished_frame: bool,\n    ) -> io::Result<usize> {\n        if finished_frame {\n            Ok(0)\n        } else {\n            Err(io::Error::new(\n                io::ErrorKind::UnexpectedEof,\n                \"incomplete frame\",\n            ))\n        }\n    }\n}\n\n/// An in-memory encoder for streams of data.\npub struct Encoder<'a> {\n    context: MaybeOwnedCCtx<'a>,\n}\n\nimpl Encoder<'static> {\n    /// Creates a new encoder.\n    pub fn new(level: i32) -> io::Result<Self> {\n        Self::with_dictionary(level, &[])\n    }\n\n    /// Creates a new encoder initialized with the given dictionary.\n    pub fn with_dictionary(level: i32, dictionary: &[u8]) -> io::Result<Self> {\n        let mut context = zstd_safe::CCtx::create();\n\n        context\n            .set_parameter(CParameter::CompressionLevel(level))\n            .map_err(map_error_code)?;\n\n        context\n            .load_dictionary(dictionary)\n            .map_err(map_error_code)?;\n\n        Ok(Encoder {\n            context: MaybeOwnedCCtx::Owned(context),\n        })\n    }\n}\n\nimpl<'a> Encoder<'a> {\n    /// Creates a new encoder that uses the provided context for serialization.\n    pub fn with_context(context: &'a mut zstd_safe::CCtx<'static>) -> Self {\n        Self {\n            context: MaybeOwnedCCtx::Borrowed(context),\n        }\n    }\n\n    /// Creates a new encoder using an existing `EncoderDictionary`.\n    pub fn with_prepared_dictionary<'b>(\n        dictionary: &EncoderDictionary<'b>,\n    ) -> io::Result<Self>\n    where\n        'b: 'a,\n    {\n        let mut context = zstd_safe::CCtx::create();\n        context\n            .ref_cdict(dictionary.as_cdict())\n            .map_err(map_error_code)?;\n        Ok(Encoder {\n            context: MaybeOwnedCCtx::Owned(context),\n        })\n    }\n\n    /// Creates a new encoder initialized with the given ref prefix.\n    pub fn with_ref_prefix<'b>(\n        level: i32,\n        ref_prefix: &'b [u8],\n    ) -> io::Result<Self>\n    where\n        'b: 'a,\n    {\n        let mut context = zstd_safe::CCtx::create();\n\n        context\n            .set_parameter(CParameter::CompressionLevel(level))\n            .map_err(map_error_code)?;\n\n        context.ref_prefix(ref_prefix).map_err(map_error_code)?;\n\n        Ok(Encoder {\n            context: MaybeOwnedCCtx::Owned(context),\n        })\n    }\n\n    /// Sets a compression parameter for this encoder.\n    pub fn set_parameter(&mut self, parameter: CParameter) -> io::Result<()> {\n        match &mut self.context {\n            MaybeOwnedCCtx::Owned(x) => x.set_parameter(parameter),\n            MaybeOwnedCCtx::Borrowed(x) => x.set_parameter(parameter),\n        }\n        .map_err(map_error_code)?;\n        Ok(())\n    }\n\n    /// Sets the size of the input expected by zstd.\n    ///\n    /// May affect compression ratio.\n    ///\n    /// It is an error to give an incorrect size (an error _will_ be returned when closing the\n    /// stream).\n    ///\n    /// If `None` is given, it assume the size is not known (default behaviour).\n    pub fn set_pledged_src_size(\n        &mut self,\n        pledged_src_size: Option<u64>,\n    ) -> io::Result<()> {\n        match &mut self.context {\n            MaybeOwnedCCtx::Owned(x) => {\n                x.set_pledged_src_size(pledged_src_size)\n            }\n            MaybeOwnedCCtx::Borrowed(x) => {\n                x.set_pledged_src_size(pledged_src_size)\n            }\n        }\n        .map_err(map_error_code)?;\n        Ok(())\n    }\n}\n\nimpl<'a> Operation for Encoder<'a> {\n    fn run<C: WriteBuf + ?Sized>(\n        &mut self,\n        input: &mut InBuffer<'_>,\n        output: &mut OutBuffer<'_, C>,\n    ) -> io::Result<usize> {\n        match &mut self.context {\n            MaybeOwnedCCtx::Owned(x) => x.compress_stream(output, input),\n            MaybeOwnedCCtx::Borrowed(x) => x.compress_stream(output, input),\n        }\n        .map_err(map_error_code)\n    }\n\n    fn flush<C: WriteBuf + ?Sized>(\n        &mut self,\n        output: &mut OutBuffer<'_, C>,\n    ) -> io::Result<usize> {\n        match &mut self.context {\n            MaybeOwnedCCtx::Owned(x) => x.flush_stream(output),\n            MaybeOwnedCCtx::Borrowed(x) => x.flush_stream(output),\n        }\n        .map_err(map_error_code)\n    }\n\n    fn finish<C: WriteBuf + ?Sized>(\n        &mut self,\n        output: &mut OutBuffer<'_, C>,\n        _finished_frame: bool,\n    ) -> io::Result<usize> {\n        match &mut self.context {\n            MaybeOwnedCCtx::Owned(x) => x.end_stream(output),\n            MaybeOwnedCCtx::Borrowed(x) => x.end_stream(output),\n        }\n        .map_err(map_error_code)\n    }\n\n    fn reinit(&mut self) -> io::Result<()> {\n        match &mut self.context {\n            MaybeOwnedCCtx::Owned(x) => {\n                x.reset(zstd_safe::ResetDirective::SessionOnly)\n            }\n            MaybeOwnedCCtx::Borrowed(x) => {\n                x.reset(zstd_safe::ResetDirective::SessionOnly)\n            }\n        }\n        .map_err(map_error_code)?;\n        Ok(())\n    }\n}\n\nenum MaybeOwnedCCtx<'a> {\n    Owned(zstd_safe::CCtx<'a>),\n    Borrowed(&'a mut zstd_safe::CCtx<'static>),\n}\n\nenum MaybeOwnedDCtx<'a> {\n    Owned(zstd_safe::DCtx<'a>),\n    Borrowed(&'a mut zstd_safe::DCtx<'static>),\n}\n\n#[cfg(test)]\nmod tests {\n\n    // This requires impl for [u8; N] which is currently behind a feature.\n    #[cfg(feature = \"arrays\")]\n    #[test]\n    fn test_cycle() {\n        use super::{Decoder, Encoder, InBuffer, Operation, OutBuffer};\n\n        let mut encoder = Encoder::new(1).unwrap();\n        let mut decoder = Decoder::new().unwrap();\n\n        // Step 1: compress\n        let mut input = InBuffer::around(b\"AbcdefAbcdefabcdef\");\n\n        let mut output = [0u8; 128];\n        let mut output = OutBuffer::around(&mut output);\n\n        loop {\n            encoder.run(&mut input, &mut output).unwrap();\n\n            if input.pos == input.src.len() {\n                break;\n            }\n        }\n        encoder.finish(&mut output, true).unwrap();\n\n        let initial_data = input.src;\n\n        // Step 2: decompress\n        let mut input = InBuffer::around(output.as_slice());\n        let mut output = [0u8; 128];\n        let mut output = OutBuffer::around(&mut output);\n\n        loop {\n            decoder.run(&mut input, &mut output).unwrap();\n\n            if input.pos == input.src.len() {\n                break;\n            }\n        }\n\n        assert_eq!(initial_data, output.as_slice());\n    }\n}\n"
  },
  {
    "path": "src/stream/read/mod.rs",
    "content": "//! Implement pull-based [`Read`] trait for both compressing and decompressing.\nuse std::io::{self, BufRead, BufReader, Read};\n\nuse crate::dict::{DecoderDictionary, EncoderDictionary};\nuse crate::stream::{raw, zio};\nuse zstd_safe;\n\n#[cfg(test)]\nmod tests;\n\n/// A decoder that decompress input data from another `Read`.\n///\n/// This allows to read a stream of compressed data\n/// (good for files or heavy network stream).\npub struct Decoder<'a, R> {\n    reader: zio::Reader<R, raw::Decoder<'a>>,\n}\n\n/// An encoder that compress input data from another `Read`.\npub struct Encoder<'a, R> {\n    reader: zio::Reader<R, raw::Encoder<'a>>,\n}\n\nimpl<R: Read> Decoder<'static, BufReader<R>> {\n    /// Creates a new decoder.\n    pub fn new(reader: R) -> io::Result<Self> {\n        let buffer_size = zstd_safe::DCtx::in_size();\n\n        Self::with_buffer(BufReader::with_capacity(buffer_size, reader))\n    }\n}\n\nimpl<R: BufRead> Decoder<'static, R> {\n    /// Creates a new decoder around a `BufRead`.\n    pub fn with_buffer(reader: R) -> io::Result<Self> {\n        Self::with_dictionary(reader, &[])\n    }\n    /// Creates a new decoder, using an existing dictionary.\n    ///\n    /// The dictionary must be the same as the one used during compression.\n    pub fn with_dictionary(reader: R, dictionary: &[u8]) -> io::Result<Self> {\n        let decoder = raw::Decoder::with_dictionary(dictionary)?;\n        let reader = zio::Reader::new(reader, decoder);\n\n        Ok(Decoder { reader })\n    }\n}\nimpl<'a, R: BufRead> Decoder<'a, R> {\n    /// Creates a new decoder which employs the provided context for deserialization.\n    pub fn with_context(\n        reader: R,\n        context: &'a mut zstd_safe::DCtx<'static>,\n    ) -> Self {\n        Self {\n            reader: zio::Reader::new(\n                reader,\n                raw::Decoder::with_context(context),\n            ),\n        }\n    }\n\n    /// Sets this `Decoder` to stop after the first frame.\n    ///\n    /// By default, it keeps concatenating frames until EOF is reached.\n    #[must_use]\n    pub fn single_frame(mut self) -> Self {\n        self.reader.set_single_frame();\n        self\n    }\n\n    /// Creates a new decoder, using an existing `DecoderDictionary`.\n    ///\n    /// The dictionary must be the same as the one used during compression.\n    pub fn with_prepared_dictionary<'b>(\n        reader: R,\n        dictionary: &DecoderDictionary<'b>,\n    ) -> io::Result<Self>\n    where\n        'b: 'a,\n    {\n        let decoder = raw::Decoder::with_prepared_dictionary(dictionary)?;\n        let reader = zio::Reader::new(reader, decoder);\n\n        Ok(Decoder { reader })\n    }\n\n    /// Creates a new decoder, using a ref prefix.\n    ///\n    /// The prefix must be the same as the one used during compression.\n    pub fn with_ref_prefix<'b>(\n        reader: R,\n        ref_prefix: &'b [u8],\n    ) -> io::Result<Self>\n    where\n        'b: 'a,\n    {\n        let decoder = raw::Decoder::with_ref_prefix(ref_prefix)?;\n        let reader = zio::Reader::new(reader, decoder);\n\n        Ok(Decoder { reader })\n    }\n\n    /// Recommendation for the size of the output buffer.\n    pub fn recommended_output_size() -> usize {\n        zstd_safe::DCtx::out_size()\n    }\n\n    /// Acquire a reference to the underlying reader.\n    pub fn get_ref(&self) -> &R {\n        self.reader.reader()\n    }\n\n    /// Acquire a mutable reference to the underlying reader.\n    ///\n    /// Note that mutation of the reader may result in surprising results if\n    /// this decoder is continued to be used.\n    pub fn get_mut(&mut self) -> &mut R {\n        self.reader.reader_mut()\n    }\n\n    /// Return the inner `Read`.\n    ///\n    /// Calling `finish()` is not *required* after reading a stream -\n    /// just use it if you need to get the `Read` back.\n    pub fn finish(self) -> R {\n        self.reader.into_inner()\n    }\n\n    crate::decoder_common!(reader);\n}\n\nimpl<R: BufRead> Read for Decoder<'_, R> {\n    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {\n        self.reader.read(buf)\n    }\n}\n\nimpl<R: Read> Encoder<'static, BufReader<R>> {\n    /// Creates a new encoder.\n    pub fn new(reader: R, level: i32) -> io::Result<Self> {\n        let buffer_size = zstd_safe::CCtx::in_size();\n\n        Self::with_buffer(BufReader::with_capacity(buffer_size, reader), level)\n    }\n}\n\nimpl<R: BufRead> Encoder<'static, R> {\n    /// Creates a new encoder around a `BufRead`.\n    pub fn with_buffer(reader: R, level: i32) -> io::Result<Self> {\n        Self::with_dictionary(reader, level, &[])\n    }\n\n    /// Creates a new encoder, using an existing dictionary.\n    ///\n    /// The dictionary must be the same as the one used during compression.\n    pub fn with_dictionary(\n        reader: R,\n        level: i32,\n        dictionary: &[u8],\n    ) -> io::Result<Self> {\n        let encoder = raw::Encoder::with_dictionary(level, dictionary)?;\n        let reader = zio::Reader::new(reader, encoder);\n\n        Ok(Encoder { reader })\n    }\n}\n\nimpl<'a, R: BufRead> Encoder<'a, R> {\n    /// Creates a new encoder which employs the provided context for serialization.\n    pub fn with_context(\n        reader: R,\n        context: &'a mut zstd_safe::CCtx<'static>,\n    ) -> Self {\n        Self {\n            reader: zio::Reader::new(\n                reader,\n                raw::Encoder::with_context(context),\n            ),\n        }\n    }\n\n    /// Creates a new encoder, using an existing `EncoderDictionary`.\n    ///\n    /// The dictionary must be the same as the one used during compression.\n    pub fn with_prepared_dictionary<'b>(\n        reader: R,\n        dictionary: &EncoderDictionary<'b>,\n    ) -> io::Result<Self>\n    where\n        'b: 'a,\n    {\n        let encoder = raw::Encoder::with_prepared_dictionary(dictionary)?;\n        let reader = zio::Reader::new(reader, encoder);\n\n        Ok(Encoder { reader })\n    }\n\n    /// Recommendation for the size of the output buffer.\n    pub fn recommended_output_size() -> usize {\n        zstd_safe::CCtx::out_size()\n    }\n\n    /// Acquire a reference to the underlying reader.\n    pub fn get_ref(&self) -> &R {\n        self.reader.reader()\n    }\n\n    /// Acquire a mutable reference to the underlying reader.\n    ///\n    /// Note that mutation of the reader may result in surprising results if\n    /// this encoder is continued to be used.\n    pub fn get_mut(&mut self) -> &mut R {\n        self.reader.reader_mut()\n    }\n\n    /// Flush any internal buffer.\n    ///\n    /// This ensures all input consumed so far is compressed.\n    ///\n    /// Since it prevents bundling currently buffered data with future input,\n    /// it may affect compression ratio.\n    ///\n    /// * Returns the number of bytes written to `out`.\n    /// * Returns `Ok(0)` when everything has been flushed.\n    pub fn flush(&mut self, out: &mut [u8]) -> io::Result<usize> {\n        self.reader.flush(out)\n    }\n\n    /// Return the inner `Read`.\n    ///\n    /// Calling `finish()` is not *required* after reading a stream -\n    /// just use it if you need to get the `Read` back.\n    pub fn finish(self) -> R {\n        self.reader.into_inner()\n    }\n\n    crate::encoder_common!(reader);\n}\n\nimpl<R: BufRead> Read for Encoder<'_, R> {\n    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {\n        self.reader.read(buf)\n    }\n}\n\nfn _assert_traits() {\n    use std::io::Cursor;\n\n    fn _assert_send<T: Send>(_: T) {}\n\n    _assert_send(Decoder::new(Cursor::new(Vec::new())));\n    _assert_send(Encoder::new(Cursor::new(Vec::new()), 1));\n}\n"
  },
  {
    "path": "src/stream/read/tests.rs",
    "content": "use crate::stream::read::{Decoder, Encoder};\nuse std::io::Read;\n\n#[test]\nfn test_error_handling() {\n    let invalid_input = b\"Abcdefghabcdefgh\";\n\n    let mut decoder = Decoder::new(&invalid_input[..]).unwrap();\n    let output = decoder.read_to_end(&mut Vec::new());\n\n    assert_eq!(output.is_err(), true);\n}\n\n#[test]\nfn test_cycle() {\n    let input = b\"Abcdefghabcdefgh\";\n\n    let mut encoder = Encoder::new(&input[..], 1).unwrap();\n    let mut buffer = Vec::new();\n    encoder.read_to_end(&mut buffer).unwrap();\n\n    let mut decoder = Decoder::new(&buffer[..]).unwrap();\n    let mut buffer = Vec::new();\n    decoder.read_to_end(&mut buffer).unwrap();\n\n    assert_eq!(input, &buffer[..]);\n}\n"
  },
  {
    "path": "src/stream/tests.rs",
    "content": "use super::{copy_encode, decode_all, encode_all};\nuse super::{Decoder, Encoder};\n\nuse partial_io::{PartialOp, PartialWrite};\n\nuse std::io;\nuse std::iter;\n\n#[test]\nfn test_end_of_frame() {\n    use std::io::{Read, Write};\n\n    let mut enc = Encoder::new(Vec::new(), 1).unwrap();\n    enc.write_all(b\"foo\").unwrap();\n    let mut compressed = enc.finish().unwrap();\n\n    // Add footer/whatever to underlying storage.\n    compressed.push(0);\n\n    // Drain zstd stream until end-of-frame.\n    let mut dec = Decoder::new(&compressed[..]).unwrap().single_frame();\n    let mut buf = Vec::new();\n    dec.read_to_end(&mut buf).unwrap();\n    assert_eq!(&buf, b\"foo\", \"Error decoding a single frame.\");\n}\n\n#[test]\nfn test_concatenated_frames() {\n    let mut buffer = Vec::new();\n    copy_encode(&b\"foo\"[..], &mut buffer, 1).unwrap();\n    copy_encode(&b\"bar\"[..], &mut buffer, 2).unwrap();\n    copy_encode(&b\"baz\"[..], &mut buffer, 3).unwrap();\n\n    assert_eq!(\n        &decode_all(&buffer[..]).unwrap(),\n        b\"foobarbaz\",\n        \"Error decoding concatenated frames.\"\n    );\n}\n\n#[test]\nfn test_flush() {\n    use std::io::Write;\n\n    let buf = Vec::new();\n    let mut z = Encoder::new(buf, 19).unwrap();\n\n    z.write_all(b\"hello\").unwrap();\n\n    z.flush().unwrap(); // Might corrupt stream\n    let buf = z.finish().unwrap();\n\n    let s = decode_all(&buf[..]).unwrap();\n    assert_eq!(s, b\"hello\", \"Error decoding after flush.\");\n}\n\n#[test]\nfn test_try_finish() {\n    use std::io::Write;\n    let mut z = setup_try_finish();\n\n    z.get_mut().set_ops(iter::repeat(PartialOp::Unlimited));\n\n    // flush() should continue to work even though write() doesn't.\n    z.flush().unwrap();\n\n    let buf = match z.try_finish() {\n        Ok(buf) => buf.into_inner(),\n        Err((_z, e)) => panic!(\"try_finish failed with {:?}\", e),\n    };\n\n    // Make sure the multiple try_finish calls didn't screw up the internal\n    // buffer and continued to produce valid compressed data.\n    assert_eq!(&decode_all(&buf[..]).unwrap(), b\"hello\", \"Error decoding\");\n}\n\n#[test]\n#[should_panic]\nfn test_write_after_try_finish() {\n    use std::io::Write;\n    let mut z = setup_try_finish();\n    z.write_all(b\"hello world\").unwrap();\n}\n\nfn setup_try_finish() -> Encoder<'static, PartialWrite<Vec<u8>>> {\n    use std::io::Write;\n\n    let buf =\n        PartialWrite::new(Vec::new(), iter::repeat(PartialOp::Unlimited));\n    let mut z = Encoder::new(buf, 19).unwrap();\n\n    z.write_all(b\"hello\").unwrap();\n\n    z.get_mut()\n        .set_ops(iter::repeat(PartialOp::Err(io::ErrorKind::WouldBlock)));\n\n    let (z, err) = z.try_finish().unwrap_err();\n    assert_eq!(\n        err.kind(),\n        io::ErrorKind::WouldBlock,\n        \"expected WouldBlock error\"\n    );\n\n    z\n}\n\n#[test]\nfn test_failing_write() {\n    use std::io::Write;\n\n    let buf = PartialWrite::new(\n        Vec::new(),\n        iter::repeat(PartialOp::Err(io::ErrorKind::WouldBlock)),\n    );\n    let mut z = Encoder::new(buf, 1).unwrap();\n\n    // Fill in enough data to make sure the buffer gets written out.\n    let input = vec![b'b'; 128 * 1024];\n    // This should work even though the inner writer rejects writes.\n    assert_eq!(\n        z.write(&input).unwrap(),\n        128 * 1024,\n        \"did not write all input buffer\"\n    );\n\n    // The next write would fail (the buffer still has some data in it).\n    assert_eq!(\n        z.write(b\"abc\").unwrap_err().kind(),\n        io::ErrorKind::WouldBlock,\n        \"expected WouldBlock error\"\n    );\n\n    z.get_mut().set_ops(iter::repeat(PartialOp::Unlimited));\n\n    // This shouldn't have led to any corruption.\n    let buf = z.finish().unwrap().into_inner();\n    assert_eq!(\n        &decode_all(&buf[..]).unwrap(),\n        &input,\n        \"WouldBlock errors should not corrupt stream\"\n    );\n}\n\n#[test]\nfn test_invalid_frame() {\n    use std::io::Read;\n\n    // I really hope this data is invalid...\n    let data = &[1u8, 2u8, 3u8, 4u8, 5u8];\n    let mut dec = Decoder::new(&data[..]).unwrap();\n    assert_eq!(\n        dec.read_to_end(&mut Vec::new()).err().map(|e| e.kind()),\n        Some(io::ErrorKind::Other),\n        \"did not encounter expected 'invalid frame' error\"\n    );\n}\n\n#[test]\nfn test_incomplete_frame() {\n    use std::io::{Read, Write};\n\n    let mut enc = Encoder::new(Vec::new(), 1).unwrap();\n    enc.write_all(b\"This is a regular string\").unwrap();\n    let mut compressed = enc.finish().unwrap();\n\n    let half_size = compressed.len() - 2;\n    compressed.truncate(half_size);\n\n    let mut dec = Decoder::new(&compressed[..]).unwrap();\n    assert_eq!(\n        dec.read_to_end(&mut Vec::new()).err().map(|e| e.kind()),\n        Some(io::ErrorKind::UnexpectedEof),\n        \"did not encounter expected EOF error\"\n    );\n}\n\n#[test]\nfn test_cli_compatibility() {\n    let input = include_bytes!(\"../../assets/example.txt.zst\");\n\n    let output = decode_all(&input[..]).unwrap();\n\n    let expected = include_bytes!(\"../../assets/example.txt\");\n\n    assert_eq!(\n        &output[..],\n        &expected[..],\n        \"error decoding cli-compressed data\"\n    );\n}\n\n#[cfg(feature = \"legacy\")]\n#[test]\nfn test_legacy() {\n    use std::fs;\n    use std::io::Read;\n\n    // Read the content from that file\n    let expected = include_bytes!(\"../../assets/example.txt\");\n\n    for version in &[5, 6, 7, 8] {\n        let filename = format!(\"assets/example.txt.v{}.zst\", version);\n        let file = fs::File::open(filename).unwrap();\n        let mut decoder = Decoder::new(file).unwrap();\n\n        let mut buffer = Vec::new();\n        decoder.read_to_end(&mut buffer).unwrap();\n\n        assert_eq!(\n            &expected[..],\n            &buffer[..],\n            \"error decompressing legacy version {}\",\n            version\n        );\n    }\n}\n\n// Check that compressing+decompressing some data gives back the original\nfn test_full_cycle(input: &[u8], level: i32) {\n    crate::test_cycle_unwrap(\n        input,\n        |data| encode_all(data, level),\n        |data| decode_all(data),\n    );\n}\n\n#[test]\nfn test_empty() {\n    // Test compressing empty data\n    for level in 1..19 {\n        test_full_cycle(b\"\", level);\n    }\n}\n\n#[test]\nfn test_ll_source() {\n    // Where could I find some long text?...\n    let data = include_bytes!(\"../../zstd-safe/zstd-sys/src/bindings_zstd.rs\");\n    // Test a few compression levels.\n    // TODO: check them all?\n    for level in 1..5 {\n        // Test compressing actual data\n        test_full_cycle(data, level);\n    }\n}\n\n#[test]\nfn reader_to_writer() {\n    use std::io::{Read, Write};\n\n    let clear = include_bytes!(\"../../assets/example.txt\");\n    // Compress using reader\n    let mut encoder = super::read::Encoder::new(&clear[..], 1).unwrap();\n\n    let mut compressed_buffer = Vec::new();\n    encoder.read_to_end(&mut compressed_buffer).unwrap();\n\n    // eprintln!(\"Compressed Buffer: {:?}\", compressed_buffer);\n\n    // Decompress using writer\n    let mut decompressed_buffer = Vec::new();\n    let mut decoder =\n        super::write::Decoder::new(&mut decompressed_buffer).unwrap();\n    decoder.write_all(&compressed_buffer[..]).unwrap();\n    decoder.flush().unwrap();\n    // eprintln!(\"{:?}\", decompressed_buffer);\n\n    assert_eq!(clear, &decompressed_buffer[..]);\n}\n\n#[test]\nfn test_finish_empty_encoder() {\n    use std::io::Write;\n    let mut enc = Encoder::new(Vec::new(), 0).unwrap();\n    enc.do_finish().unwrap();\n    enc.write_all(b\"this should not work\").unwrap_err();\n    enc.finish().unwrap();\n}\n"
  },
  {
    "path": "src/stream/write/mod.rs",
    "content": "//! Implement push-based [`Write`] trait for both compressing and decompressing.\nuse std::io::{self, Write};\n\nuse zstd_safe;\n\nuse crate::dict::{DecoderDictionary, EncoderDictionary};\nuse crate::stream::{raw, zio};\n\n#[cfg(test)]\nmod tests;\n\n/// An encoder that compress and forward data to another writer.\n///\n/// This allows to compress a stream of data\n/// (good for files or heavy network stream).\n///\n/// Don't forget to call [`finish()`] before dropping it!\n///\n/// Alternatively, you can call [`auto_finish()`] to use an\n/// [`AutoFinishEncoder`] that will finish on drop.\n///\n/// Note: The zstd library has its own internal input buffer (~128kb).\n///\n/// [`finish()`]: #method.finish\n/// [`auto_finish()`]: #method.auto_finish\n/// [`AutoFinishEncoder`]: AutoFinishEncoder\npub struct Encoder<'a, W: Write> {\n    // output writer (compressed data)\n    writer: zio::Writer<W, raw::Encoder<'a>>,\n}\n\n/// A decoder that decompress and forward data to another writer.\n///\n/// Note that you probably want to `flush()` after writing your stream content.\n/// You can use [`auto_flush()`] to automatically flush the writer on drop.\n///\n/// [`auto_flush()`]: Decoder::auto_flush\npub struct Decoder<'a, W: Write> {\n    // output writer (decompressed data)\n    writer: zio::Writer<W, raw::Decoder<'a>>,\n}\n\n/// A wrapper around an `Encoder<W>` that finishes the stream on drop.\n///\n/// This can be created by the [`auto_finish()`] method on the [`Encoder`].\n///\n/// [`auto_finish()`]: Encoder::auto_finish\n/// [`Encoder`]: Encoder\npub struct AutoFinishEncoder<\n    'a,\n    W: Write,\n    F: FnMut(io::Result<W>) = Box<dyn Send + FnMut(io::Result<W>)>,\n> {\n    // We wrap this in an option to take it during drop.\n    encoder: Option<Encoder<'a, W>>,\n\n    on_finish: Option<F>,\n}\n\n/// A wrapper around a `Decoder<W>` that flushes the stream on drop.\n///\n/// This can be created by the [`auto_flush()`] method on the [`Decoder`].\n///\n/// [`auto_flush()`]: Decoder::auto_flush\n/// [`Decoder`]: Decoder\npub struct AutoFlushDecoder<\n    'a,\n    W: Write,\n    F: FnMut(io::Result<()>) = Box<dyn Send + FnMut(io::Result<()>)>,\n> {\n    // We wrap this in an option to take it during drop.\n    decoder: Option<Decoder<'a, W>>,\n\n    on_flush: Option<F>,\n}\n\nimpl<'a, W: Write, F: FnMut(io::Result<()>)> AutoFlushDecoder<'a, W, F> {\n    fn new(decoder: Decoder<'a, W>, on_flush: F) -> Self {\n        AutoFlushDecoder {\n            decoder: Some(decoder),\n            on_flush: Some(on_flush),\n        }\n    }\n\n    /// Acquires a reference to the underlying writer.\n    pub fn get_ref(&self) -> &W {\n        self.decoder.as_ref().unwrap().get_ref()\n    }\n\n    /// Acquires a mutable reference to the underlying writer.\n    ///\n    /// Note that mutation of the writer may result in surprising results if\n    /// this decoder is continued to be used.\n    ///\n    /// Mostly used for testing purposes.\n    pub fn get_mut(&mut self) -> &mut W {\n        self.decoder.as_mut().unwrap().get_mut()\n    }\n}\n\nimpl<W, F> Drop for AutoFlushDecoder<'_, W, F>\nwhere\n    W: Write,\n    F: FnMut(io::Result<()>),\n{\n    fn drop(&mut self) {\n        let mut decoder = self.decoder.take().unwrap();\n        let result = decoder.flush();\n        if let Some(mut on_finish) = self.on_flush.take() {\n            on_finish(result);\n        }\n    }\n}\n\nimpl<W: Write, F: FnMut(io::Result<()>)> Write for AutoFlushDecoder<'_, W, F> {\n    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {\n        self.decoder.as_mut().unwrap().write(buf)\n    }\n\n    fn flush(&mut self) -> io::Result<()> {\n        self.decoder.as_mut().unwrap().flush()\n    }\n}\n\nimpl<'a, W: Write, F: FnMut(io::Result<W>)> AutoFinishEncoder<'a, W, F> {\n    fn new(encoder: Encoder<'a, W>, on_finish: F) -> Self {\n        AutoFinishEncoder {\n            encoder: Some(encoder),\n            on_finish: Some(on_finish),\n        }\n    }\n\n    /// Acquires a reference to the underlying writer.\n    pub fn get_ref(&self) -> &W {\n        self.encoder.as_ref().unwrap().get_ref()\n    }\n\n    /// Acquires a mutable reference to the underlying writer.\n    ///\n    /// Note that mutation of the writer may result in surprising results if\n    /// this encoder is continued to be used.\n    ///\n    /// Mostly used for testing purposes.\n    pub fn get_mut(&mut self) -> &mut W {\n        self.encoder.as_mut().unwrap().get_mut()\n    }\n}\n\nimpl<W: Write, F: FnMut(io::Result<W>)> Drop for AutoFinishEncoder<'_, W, F> {\n    fn drop(&mut self) {\n        let result = self.encoder.take().unwrap().finish();\n        if let Some(mut on_finish) = self.on_finish.take() {\n            on_finish(result);\n        }\n    }\n}\n\nimpl<W: Write, F: FnMut(io::Result<W>)> Write for AutoFinishEncoder<'_, W, F> {\n    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {\n        self.encoder.as_mut().unwrap().write(buf)\n    }\n\n    fn flush(&mut self) -> io::Result<()> {\n        self.encoder.as_mut().unwrap().flush()\n    }\n}\n\nimpl<W: Write> Encoder<'static, W> {\n    /// Creates a new encoder.\n    ///\n    /// `level`: compression level (1-22).\n    ///\n    /// A level of `0` uses zstd's default (currently `3`).\n    pub fn new(writer: W, level: i32) -> io::Result<Self> {\n        Self::with_dictionary(writer, level, &[])\n    }\n\n    /// Creates a new encoder, using an existing dictionary.\n    ///\n    /// (Provides better compression ratio for small files,\n    /// but requires the dictionary to be present during decompression.)\n    ///\n    /// A level of `0` uses zstd's default (currently `3`).\n    pub fn with_dictionary(\n        writer: W,\n        level: i32,\n        dictionary: &[u8],\n    ) -> io::Result<Self> {\n        let encoder = raw::Encoder::with_dictionary(level, dictionary)?;\n        Ok(Self::with_encoder(writer, encoder))\n    }\n}\n\nimpl<'a, W: Write> Encoder<'a, W> {\n    /// Creates a new encoder from a prepared zio writer.\n    pub fn with_writer(writer: zio::Writer<W, raw::Encoder<'a>>) -> Self {\n        Self { writer }\n    }\n\n    /// Creates a new encoder from the given `Write` and raw encoder.\n    pub fn with_encoder(writer: W, encoder: raw::Encoder<'a>) -> Self {\n        let writer = zio::Writer::new(writer, encoder);\n        Self::with_writer(writer)\n    }\n\n    /// Creates an encoder that uses the provided context to compress a stream.\n    pub fn with_context(\n        writer: W,\n        context: &'a mut zstd_safe::CCtx<'static>,\n    ) -> Self {\n        let encoder = raw::Encoder::with_context(context);\n        Self::with_encoder(writer, encoder)\n    }\n\n    /// Creates a new encoder, using an existing prepared `EncoderDictionary`.\n    ///\n    /// (Provides better compression ratio for small files,\n    /// but requires the dictionary to be present during decompression.)\n    pub fn with_prepared_dictionary<'b>(\n        writer: W,\n        dictionary: &EncoderDictionary<'b>,\n    ) -> io::Result<Self>\n    where\n        'b: 'a,\n    {\n        let encoder = raw::Encoder::with_prepared_dictionary(dictionary)?;\n        Ok(Self::with_encoder(writer, encoder))\n    }\n\n    /// Creates a new encoder, using a ref prefix\n    pub fn with_ref_prefix<'b>(\n        writer: W,\n        level: i32,\n        ref_prefix: &'b [u8],\n    ) -> io::Result<Self>\n    where\n        'b: 'a,\n    {\n        let encoder = raw::Encoder::with_ref_prefix(level, ref_prefix)?;\n        Ok(Self::with_encoder(writer, encoder))\n    }\n\n    /// Returns a wrapper around `self` that will finish the stream on drop.\n    pub fn auto_finish(self) -> AutoFinishEncoder<'a, W> {\n        AutoFinishEncoder {\n            encoder: Some(self),\n            on_finish: None,\n        }\n    }\n\n    /// Returns an encoder that will finish the stream on drop.\n    ///\n    /// Calls the given callback with the result from `finish()`. This runs during drop so it's\n    /// important that the provided callback doesn't panic.\n    pub fn on_finish<F: FnMut(io::Result<W>)>(\n        self,\n        f: F,\n    ) -> AutoFinishEncoder<'a, W, F> {\n        AutoFinishEncoder::new(self, f)\n    }\n\n    /// Acquires a reference to the underlying writer.\n    pub fn get_ref(&self) -> &W {\n        self.writer.writer()\n    }\n\n    /// Acquires a mutable reference to the underlying writer.\n    ///\n    /// Note that mutation of the writer may result in surprising results if\n    /// this encoder is continued to be used.\n    pub fn get_mut(&mut self) -> &mut W {\n        self.writer.writer_mut()\n    }\n\n    /// **Required**: Finishes the stream.\n    ///\n    /// You *need* to finish the stream when you're done writing, either with\n    /// this method or with [`try_finish(self)`](#method.try_finish).\n    ///\n    /// This returns the inner writer in case you need it.\n    ///\n    /// To get back `self` in case an error happened, use `try_finish`.\n    ///\n    /// **Note**: If you don't want (or can't) call `finish()` manually after\n    ///           writing your data, consider using `auto_finish()` to get an\n    ///           `AutoFinishEncoder`.\n    pub fn finish(self) -> io::Result<W> {\n        self.try_finish().map_err(|(_, err)| err)\n    }\n\n    /// **Required**: Attempts to finish the stream.\n    ///\n    /// You *need* to finish the stream when you're done writing, either with\n    /// this method or with [`finish(self)`](#method.finish).\n    ///\n    /// This returns the inner writer if the finish was successful, or the\n    /// object plus an error if it wasn't.\n    ///\n    /// `write` on this object will panic after `try_finish` has been called,\n    /// even if it fails.\n    pub fn try_finish(mut self) -> Result<W, (Self, io::Error)> {\n        match self.writer.finish() {\n            // Return the writer, because why not\n            Ok(()) => Ok(self.writer.into_inner().0),\n            Err(e) => Err((self, e)),\n        }\n    }\n\n    /// Attempts to finish the stream.\n    ///\n    /// You *need* to finish the stream when you're done writing, either with\n    /// this method or with [`finish(self)`](#method.finish).\n    pub fn do_finish(&mut self) -> io::Result<()> {\n        self.writer.finish()\n    }\n\n    /// Return a recommendation for the size of data to write at once.\n    pub fn recommended_input_size() -> usize {\n        zstd_safe::CCtx::in_size()\n    }\n\n    crate::encoder_common!(writer);\n}\n\nimpl<'a, W: Write> Write for Encoder<'a, W> {\n    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {\n        self.writer.write(buf)\n    }\n\n    fn flush(&mut self) -> io::Result<()> {\n        self.writer.flush()\n    }\n}\n\nimpl<W: Write> Decoder<'static, W> {\n    /// Creates a new decoder.\n    pub fn new(writer: W) -> io::Result<Self> {\n        Self::with_dictionary(writer, &[])\n    }\n\n    /// Creates a new decoder, using an existing dictionary.\n    ///\n    /// (Provides better compression ratio for small files,\n    /// but requires the dictionary to be present during decompression.)\n    pub fn with_dictionary(writer: W, dictionary: &[u8]) -> io::Result<Self> {\n        let decoder = raw::Decoder::with_dictionary(dictionary)?;\n        Ok(Self::with_decoder(writer, decoder))\n    }\n}\n\nimpl<'a, W: Write> Decoder<'a, W> {\n    /// Creates a new decoder around the given prepared zio writer.\n    ///\n    /// # Examples\n    ///\n    /// ```rust\n    /// fn wrap<W: std::io::Write>(writer: W) -> zstd::stream::write::Decoder<'static, W> {\n    ///   let decoder = zstd::stream::raw::Decoder::new().unwrap();\n    ///   let writer = zstd::stream::zio::Writer::new(writer, decoder);\n    ///   zstd::stream::write::Decoder::with_writer(writer)\n    /// }\n    /// ```\n    pub fn with_writer(writer: zio::Writer<W, raw::Decoder<'a>>) -> Self {\n        Decoder { writer }\n    }\n\n    /// Creates a new decoder around the given `Write` and raw decoder.\n    pub fn with_decoder(writer: W, decoder: raw::Decoder<'a>) -> Self {\n        let writer = zio::Writer::new(writer, decoder);\n        Decoder { writer }\n    }\n\n    /// Creates a decoder that uses the provided context to decompress a stream.\n    pub fn with_context(\n        writer: W,\n        context: &'a mut zstd_safe::DCtx<'static>,\n    ) -> Self {\n        let encoder = raw::Decoder::with_context(context);\n        Self::with_decoder(writer, encoder)\n    }\n\n    /// Creates a new decoder, using an existing prepared `DecoderDictionary`.\n    ///\n    /// (Provides better compression ratio for small files,\n    /// but requires the dictionary to be present during decompression.)\n    pub fn with_prepared_dictionary<'b>(\n        writer: W,\n        dictionary: &DecoderDictionary<'b>,\n    ) -> io::Result<Self>\n    where\n        'b: 'a,\n    {\n        let decoder = raw::Decoder::with_prepared_dictionary(dictionary)?;\n        Ok(Self::with_decoder(writer, decoder))\n    }\n\n    /// Acquires a reference to the underlying writer.\n    pub fn get_ref(&self) -> &W {\n        self.writer.writer()\n    }\n\n    /// Acquires a mutable reference to the underlying writer.\n    ///\n    /// Note that mutation of the writer may result in surprising results if\n    /// this decoder is continued to be used.\n    pub fn get_mut(&mut self) -> &mut W {\n        self.writer.writer_mut()\n    }\n\n    /// Returns the inner `Write`.\n    pub fn into_inner(self) -> W {\n        self.writer.into_inner().0\n    }\n\n    /// Return a recommendation for the size of data to write at once.\n    pub fn recommended_input_size() -> usize {\n        zstd_safe::DCtx::in_size()\n    }\n\n    /// Returns a wrapper around `self` that will flush the stream on drop.\n    pub fn auto_flush(self) -> AutoFlushDecoder<'a, W> {\n        AutoFlushDecoder {\n            decoder: Some(self),\n            on_flush: None,\n        }\n    }\n\n    /// Returns a decoder that will flush the stream on drop.\n    ///\n    /// Calls the given callback with the result from `flush()`. This runs during drop so it's\n    /// important that the provided callback doesn't panic.\n    pub fn on_flush<F: FnMut(io::Result<()>)>(\n        self,\n        f: F,\n    ) -> AutoFlushDecoder<'a, W, F> {\n        AutoFlushDecoder::new(self, f)\n    }\n\n    crate::decoder_common!(writer);\n}\n\nimpl<W: Write> Write for Decoder<'_, W> {\n    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {\n        self.writer.write(buf)\n    }\n\n    fn flush(&mut self) -> io::Result<()> {\n        self.writer.flush()\n    }\n}\n\nfn _assert_traits() {\n    fn _assert_send<T: Send>(_: T) {}\n\n    _assert_send(Decoder::new(Vec::new()));\n    _assert_send(Encoder::new(Vec::new(), 1));\n    _assert_send(Decoder::new(Vec::new()).unwrap().auto_flush());\n    _assert_send(Encoder::new(Vec::new(), 1).unwrap().auto_finish());\n}\n"
  },
  {
    "path": "src/stream/write/tests.rs",
    "content": "use std::io::{Cursor, Write};\nuse std::iter;\n\nuse partial_io::{PartialOp, PartialWrite};\n\nuse crate::stream::decode_all;\nuse crate::stream::write::{Decoder, Encoder};\n\n#[test]\nfn test_cycle() {\n    let input = b\"Abcdefghabcdefgh\";\n\n    let buffer = Cursor::new(Vec::new());\n    let mut encoder = Encoder::new(buffer, 1).unwrap();\n    encoder.write_all(input).unwrap();\n    let encoded = encoder.finish().unwrap().into_inner();\n\n    // println!(\"Encoded: {:?}\", encoded);\n\n    let buffer = Cursor::new(Vec::new());\n    let mut decoder = Decoder::new(buffer).unwrap();\n    decoder.write_all(&encoded).unwrap();\n    decoder.flush().unwrap();\n    let decoded = decoder.into_inner().into_inner();\n\n    assert_eq!(input, &decoded[..]);\n}\n\n/// Test that flush after a partial write works successfully without\n/// corrupting the frame. This test is in this module because it checks\n/// internal implementation details.\n#[test]\nfn test_partial_write_flush() {\n    let input = vec![b'b'; 128 * 1024];\n    let mut z = setup_partial_write(&input);\n\n    // flush shouldn't corrupt the stream\n    z.flush().unwrap();\n\n    let buf = z.finish().unwrap().into_inner();\n    assert_eq!(&decode_all(&buf[..]).unwrap(), &input);\n}\n\n/// Test that finish after a partial write works successfully without\n/// corrupting the frame. This test is in this module because it checks\n/// internal implementation details.\n#[test]\nfn test_partial_write_finish() {\n    let input = vec![b'b'; 128 * 1024];\n    let z = setup_partial_write(&input);\n\n    // finish shouldn't corrupt the stream\n    let buf = z.finish().unwrap().into_inner();\n    assert_eq!(&decode_all(&buf[..]).unwrap(), &input);\n}\n\nfn setup_partial_write(input_data: &[u8]) -> Encoder<PartialWrite<Vec<u8>>> {\n    let buf =\n        PartialWrite::new(Vec::new(), iter::repeat(PartialOp::Limited(1)));\n    let mut z = Encoder::new(buf, 1).unwrap();\n\n    // Fill in enough data to make sure the buffer gets written out.\n    z.write(input_data).unwrap();\n\n    {\n        let inner = &mut z.writer;\n        // At this point, the internal buffer in z should have some data.\n        assert_ne!(inner.offset(), inner.buffer().len());\n    }\n\n    z\n}\n"
  },
  {
    "path": "src/stream/zio/mod.rs",
    "content": "//! Wrappers around raw operations implementing `std::io::{Read, Write}`.\n\nmod reader;\nmod writer;\n\npub use self::reader::Reader;\npub use self::writer::Writer;\n"
  },
  {
    "path": "src/stream/zio/reader.rs",
    "content": "use std::io::{self, BufRead, Read};\n\nuse crate::stream::raw::{InBuffer, Operation, OutBuffer};\n\n// [ reader -> zstd ] -> output\n/// Implements the [`Read`] API around an [`Operation`].\n///\n/// This can be used to wrap a raw in-memory operation in a read-focused API.\n///\n/// It can wrap either a compression or decompression operation, and pulls\n/// input data from a wrapped `Read`.\npub struct Reader<R, D> {\n    reader: R,\n    operation: D,\n\n    state: State,\n\n    single_frame: bool,\n    finished_frame: bool,\n}\n\nenum State {\n    // Still actively reading from the inner `Read`\n    Reading,\n    // We reached EOF from the inner `Read`, now flushing.\n    PastEof,\n    // We are fully done, nothing can be read.\n    Finished,\n}\n\nimpl<R, D> Reader<R, D> {\n    /// Creates a new `Reader`.\n    ///\n    /// `reader` will be used to pull input data for the given operation.\n    pub fn new(reader: R, operation: D) -> Self {\n        Reader {\n            reader,\n            operation,\n            state: State::Reading,\n            single_frame: false,\n            finished_frame: false,\n        }\n    }\n\n    /// Sets `self` to stop after the first decoded frame.\n    pub fn set_single_frame(&mut self) {\n        self.single_frame = true;\n    }\n\n    /// Returns a mutable reference to the underlying operation.\n    pub fn operation_mut(&mut self) -> &mut D {\n        &mut self.operation\n    }\n\n    /// Returns a mutable reference to the underlying reader.\n    pub fn reader_mut(&mut self) -> &mut R {\n        &mut self.reader\n    }\n\n    /// Returns a reference to the underlying reader.\n    pub fn reader(&self) -> &R {\n        &self.reader\n    }\n\n    /// Returns the inner reader.\n    pub fn into_inner(self) -> R {\n        self.reader\n    }\n\n    /// Flush any internal buffer.\n    ///\n    /// For encoders, this ensures all input consumed so far is compressed.\n    pub fn flush(&mut self, output: &mut [u8]) -> io::Result<usize>\n    where\n        D: Operation,\n    {\n        self.operation.flush(&mut OutBuffer::around(output))\n    }\n}\n// Read and retry on Interrupted errors.\nfn fill_buf<R>(reader: &mut R) -> io::Result<&[u8]>\nwhere\n    R: BufRead,\n{\n    // This doesn't work right now because of the borrow-checker.\n    // When it can be made to compile, it would allow Reader to automatically\n    // retry on `Interrupted` error.\n    /*\n    loop {\n        match reader.fill_buf() {\n            Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}\n            otherwise => return otherwise,\n        }\n    }\n    */\n\n    // Workaround for now\n    let res = reader.fill_buf()?;\n\n    // eprintln!(\"Filled buffer: {:?}\", res);\n\n    Ok(res)\n}\n\nimpl<R, D> Read for Reader<R, D>\nwhere\n    R: BufRead,\n    D: Operation,\n{\n    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {\n        // Keep trying until _something_ has been written.\n        let mut first = true;\n        loop {\n            match self.state {\n                State::Reading => {\n                    let (bytes_read, bytes_written) = {\n                        // Start with a fresh pool of un-processed data.\n                        // This is the only line that can return an interruption error.\n                        let input = if first {\n                            // eprintln!(\"First run, no input coming.\");\n                            b\"\"\n                        } else {\n                            fill_buf(&mut self.reader)?\n                        };\n\n                        // eprintln!(\"Input = {:?}\", input);\n\n                        // It's possible we don't have any new data to read.\n                        // (In this case we may still have zstd's own buffer to clear.)\n                        if !first && input.is_empty() {\n                            self.state = State::PastEof;\n                            continue;\n                        }\n                        first = false;\n\n                        let mut src = InBuffer::around(input);\n                        let mut dst = OutBuffer::around(buf);\n\n                        // We don't want empty input (from first=true) to cause a frame\n                        // re-initialization.\n                        if self.finished_frame && !input.is_empty() {\n                            // eprintln!(\"!! Reigniting !!\");\n                            self.operation.reinit()?;\n                            self.finished_frame = false;\n                        }\n\n                        // Phase 1: feed input to the operation\n                        let hint = self.operation.run(&mut src, &mut dst)?;\n                        // eprintln!(\n                        //     \"Hint={} Just run our operation:\\n In={:?}\\n Out={:?}\",\n                        //     hint, src, dst\n                        // );\n\n                        if hint == 0 {\n                            // In practice this only happens when decoding, when we just finished\n                            // reading a frame.\n                            self.finished_frame = true;\n                            if self.single_frame {\n                                self.state = State::Finished;\n                            }\n                        }\n\n                        // eprintln!(\"Output: {:?}\", dst);\n\n                        (src.pos(), dst.pos())\n                    };\n\n                    self.reader.consume(bytes_read);\n\n                    if bytes_written > 0 {\n                        return Ok(bytes_written);\n                    }\n\n                    // We need more data! Try again!\n                }\n                State::PastEof => {\n                    let mut dst = OutBuffer::around(buf);\n\n                    // We already sent all the input we could get to zstd. Time to flush out the\n                    // buffer and be done with it.\n\n                    // Phase 2: flush out the operation's buffer\n                    // Keep calling `finish()` until the buffer is empty.\n                    let hint = self\n                        .operation\n                        .finish(&mut dst, self.finished_frame)?;\n                    // eprintln!(\"Hint: {} ; Output: {:?}\", hint, dst);\n                    if hint == 0 {\n                        // This indicates that the footer is complete.\n                        // This is the only way to terminate the stream cleanly.\n                        self.state = State::Finished;\n                    }\n\n                    return Ok(dst.pos());\n                }\n                State::Finished => {\n                    return Ok(0);\n                }\n            }\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::Reader;\n    use std::io::{Cursor, Read};\n\n    #[test]\n    fn test_noop() {\n        use crate::stream::raw::NoOp;\n\n        let input = b\"AbcdefghAbcdefgh.\";\n\n        // Test reader\n        let mut output = Vec::new();\n        {\n            let mut reader = Reader::new(Cursor::new(input), NoOp);\n            reader.read_to_end(&mut output).unwrap();\n        }\n        assert_eq!(&output, input);\n    }\n\n    #[test]\n    fn test_compress() {\n        use crate::stream::raw::Encoder;\n\n        let input = b\"AbcdefghAbcdefgh.\";\n\n        // Test reader\n        let mut output = Vec::new();\n        {\n            let mut reader =\n                Reader::new(Cursor::new(input), Encoder::new(1).unwrap());\n            reader.read_to_end(&mut output).unwrap();\n        }\n        // eprintln!(\"{:?}\", output);\n        let decoded = crate::decode_all(&output[..]).unwrap();\n        assert_eq!(&decoded, input);\n    }\n}\n"
  },
  {
    "path": "src/stream/zio/writer.rs",
    "content": "use std::io::{self, Write};\n\nuse crate::stream::raw::{InBuffer, Operation, OutBuffer};\n\n// input -> [ zstd -> buffer -> writer ]\n\n/// Implements the [`Write`] API around an [`Operation`].\n///\n/// This can be used to wrap a raw in-memory operation in a write-focused API.\n///\n/// It can be used with either compression or decompression, and forwards the\n/// output to a wrapped `Write`.\npub struct Writer<W, D> {\n    /// Either an encoder or a decoder.\n    operation: D,\n\n    /// Where we send the output of the operation.\n    writer: W,\n\n    /// Offset into the buffer\n    ///\n    /// Only things after this matter. Things before have already been sent to the writer.\n    offset: usize,\n\n    /// Output buffer\n    ///\n    /// Where the operation writes, before it gets flushed to the writer\n    buffer: Vec<u8>,\n\n    // When `true`, indicates that nothing should be added to the buffer.\n    // All that's left if to empty the buffer.\n    finished: bool,\n\n    /// When `true`, the operation just finished a frame.\n    ///\n    /// Only happens when decompressing.\n    /// The context needs to be re-initialized to process the next frame.\n    finished_frame: bool,\n}\n\nimpl<W, D> Writer<W, D>\nwhere\n    W: Write,\n    D: Operation,\n{\n    /// Creates a new `Writer` with a fixed buffer capacity of 32KB\n    ///\n    /// All output from the given operation will be forwarded to `writer`.\n    pub fn new(writer: W, operation: D) -> Self {\n        // 32KB buffer? That's what flate2 uses\n        Self::new_with_capacity(writer, operation, 32 * 1024)\n    }\n\n    /// Creates a new `Writer` with user defined capacity.\n    ///\n    /// All output from the given operation will be forwarded to `writer`.\n    pub fn new_with_capacity(\n        writer: W,\n        operation: D,\n        capacity: usize,\n    ) -> Self {\n        Self::with_output_buffer(\n            Vec::with_capacity(capacity),\n            writer,\n            operation,\n        )\n    }\n\n    /// Creates a new `Writer` using the given output buffer.\n    ///\n    /// The output buffer _must_ have pre-allocated capacity (its capacity will not be changed after).\n    ///\n    /// Usually you would use `Vec::with_capacity(desired_buffer_size)`.\n    pub fn with_output_buffer(\n        output_buffer: Vec<u8>,\n        writer: W,\n        operation: D,\n    ) -> Self {\n        Writer {\n            writer,\n            operation,\n\n            offset: 0,\n            // 32KB buffer? That's what flate2 uses\n            buffer: output_buffer,\n\n            finished: false,\n            finished_frame: false,\n        }\n    }\n\n    /// Ends the stream.\n    ///\n    /// This *must* be called after all data has been written to finish the\n    /// stream.\n    ///\n    /// If you forget to call this and just drop the `Writer`, you *will* have\n    /// an incomplete output.\n    ///\n    /// Keep calling it until it returns `Ok(())`, then don't call it again.\n    pub fn finish(&mut self) -> io::Result<()> {\n        loop {\n            // Keep trying until we're really done.\n            self.write_from_offset()?;\n\n            // At this point the buffer has been fully written out.\n\n            if self.finished {\n                return Ok(());\n            }\n\n            // Let's fill this buffer again!\n\n            let finished_frame = self.finished_frame;\n            let hint =\n                self.with_buffer(|dst, op| op.finish(dst, finished_frame));\n            self.offset = 0;\n            // println!(\"Hint: {:?}\\nOut:{:?}\", hint, &self.buffer);\n\n            // We return here if zstd had a problem.\n            // Could happen with invalid data, ...\n            let hint = hint?;\n\n            if hint != 0 && self.buffer.is_empty() {\n                // This happens if we are decoding an incomplete frame.\n                return Err(io::Error::new(\n                    io::ErrorKind::UnexpectedEof,\n                    \"incomplete frame\",\n                ));\n            }\n\n            // println!(\"Finishing {}, {}\", bytes_written, hint);\n\n            self.finished = hint == 0;\n        }\n    }\n\n    /// Run the given closure on `self.buffer`.\n    ///\n    /// The buffer will be cleared, and made available wrapped in an `OutBuffer`.\n    fn with_buffer<F, T>(&mut self, f: F) -> T\n    where\n        F: FnOnce(&mut OutBuffer<'_, Vec<u8>>, &mut D) -> T,\n    {\n        self.buffer.clear();\n        let mut output = OutBuffer::around(&mut self.buffer);\n        // eprintln!(\"Output: {:?}\", output);\n        f(&mut output, &mut self.operation)\n    }\n\n    /// Attempt to write `self.buffer` to the wrapped writer.\n    ///\n    /// Returns `Ok(())` once all the buffer has been written.\n    fn write_from_offset(&mut self) -> io::Result<()> {\n        // The code looks a lot like `write_all`, but keeps track of what has\n        // been written in case we're interrupted.\n        while self.offset < self.buffer.len() {\n            match self.writer.write(&self.buffer[self.offset..]) {\n                Ok(0) => {\n                    return Err(io::Error::new(\n                        io::ErrorKind::WriteZero,\n                        \"writer will not accept any more data\",\n                    ))\n                }\n                Ok(n) => self.offset += n,\n                Err(ref e) if e.kind() == io::ErrorKind::Interrupted => (),\n                Err(e) => return Err(e),\n            }\n        }\n        Ok(())\n    }\n\n    /// Return the wrapped `Writer` and `Operation`.\n    ///\n    /// Careful: if you call this before calling [`Writer::finish()`], the\n    /// output may be incomplete.\n    pub fn into_inner(self) -> (W, D) {\n        (self.writer, self.operation)\n    }\n\n    /// Gives a reference to the inner writer.\n    pub fn writer(&self) -> &W {\n        &self.writer\n    }\n\n    /// Gives a mutable reference to the inner writer.\n    pub fn writer_mut(&mut self) -> &mut W {\n        &mut self.writer\n    }\n\n    /// Gives a reference to the inner operation.\n    pub fn operation(&self) -> &D {\n        &self.operation\n    }\n\n    /// Gives a mutable reference to the inner operation.\n    pub fn operation_mut(&mut self) -> &mut D {\n        &mut self.operation\n    }\n\n    /// Returns the offset in the current buffer. Only useful for debugging.\n    #[cfg(test)]\n    pub fn offset(&self) -> usize {\n        self.offset\n    }\n\n    /// Returns the current buffer. Only useful for debugging.\n    #[cfg(test)]\n    pub fn buffer(&self) -> &[u8] {\n        &self.buffer\n    }\n}\n\nimpl<W, D> Write for Writer<W, D>\nwhere\n    W: Write,\n    D: Operation,\n{\n    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {\n        if self.finished {\n            return Err(io::Error::new(\n                io::ErrorKind::Other,\n                \"encoder is finished\",\n            ));\n        }\n        // Keep trying until _something_ has been consumed.\n        // As soon as some input has been taken, we cannot afford\n        // to take any chance: if an error occurs, the user couldn't know\n        // that some data _was_ successfully written.\n        loop {\n            // First, write any pending data from `self.buffer`.\n            self.write_from_offset()?;\n            // At this point `self.buffer` can safely be discarded.\n\n            // Support writing concatenated frames by re-initializing the\n            // context.\n            if self.finished_frame {\n                self.operation.reinit()?;\n                self.finished_frame = false;\n            }\n\n            let mut src = InBuffer::around(buf);\n            let hint = self.with_buffer(|dst, op| op.run(&mut src, dst));\n            let bytes_read = src.pos;\n\n            // eprintln!(\n            //     \"Write Hint: {:?}\\n src: {:?}\\n dst: {:?}\",\n            //     hint, src, self.buffer\n            // );\n\n            self.offset = 0;\n            let hint = hint?;\n\n            if hint == 0 {\n                self.finished_frame = true;\n            }\n\n            // As we said, as soon as we've consumed something, return.\n            if bytes_read > 0 || buf.is_empty() {\n                // println!(\"Returning {}\", bytes_read);\n                return Ok(bytes_read);\n            }\n        }\n    }\n\n    fn flush(&mut self) -> io::Result<()> {\n        let mut finished = self.finished;\n        loop {\n            // If the output is blocked or has an error, return now.\n            self.write_from_offset()?;\n\n            if finished {\n                break;\n            }\n\n            let hint = self.with_buffer(|dst, op| op.flush(dst));\n\n            self.offset = 0;\n            let hint = hint?;\n\n            finished = hint == 0;\n        }\n\n        self.writer.flush()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::Writer;\n    use std::io::Write;\n\n    #[test]\n    fn test_noop() {\n        use crate::stream::raw::NoOp;\n\n        let input = b\"AbcdefghAbcdefgh.\";\n\n        // Test writer\n        let mut output = Vec::new();\n        {\n            let mut writer = Writer::new(&mut output, NoOp);\n            writer.write_all(input).unwrap();\n            writer.finish().unwrap();\n        }\n        assert_eq!(&output, input);\n    }\n\n    #[test]\n    fn test_compress() {\n        use crate::stream::raw::Encoder;\n\n        let input = b\"AbcdefghAbcdefgh.\";\n\n        // Test writer\n        let mut output = Vec::new();\n        {\n            let mut writer =\n                Writer::new(&mut output, Encoder::new(1).unwrap());\n            writer.write_all(input).unwrap();\n            writer.finish().unwrap();\n        }\n        // println!(\"Output: {:?}\", output);\n        let decoded = crate::decode_all(&output[..]).unwrap();\n        assert_eq!(&decoded, input);\n    }\n\n    #[test]\n    fn test_compress_with_capacity() {\n        use crate::stream::raw::Encoder;\n\n        let input = b\"AbcdefghAbcdefgh.\";\n\n        // Test writer\n        let mut output = Vec::new();\n        {\n            let mut writer = Writer::new_with_capacity(\n                &mut output,\n                Encoder::new(1).unwrap(),\n                64,\n            );\n            assert_eq!(writer.buffer.capacity(), 64);\n            writer.write_all(input).unwrap();\n            writer.finish().unwrap();\n        }\n        let decoded = crate::decode_all(&output[..]).unwrap();\n        assert_eq!(&decoded, input);\n    }\n\n    #[test]\n    fn test_decompress() {\n        use crate::stream::raw::Decoder;\n\n        let input = b\"AbcdefghAbcdefgh.\";\n        let compressed = crate::encode_all(&input[..], 1).unwrap();\n\n        // Test writer\n        let mut output = Vec::new();\n        {\n            let mut writer = Writer::new(&mut output, Decoder::new().unwrap());\n            writer.write_all(&compressed).unwrap();\n            writer.finish().unwrap();\n        }\n        // println!(\"Output: {:?}\", output);\n        assert_eq!(&output, input);\n    }\n\n    #[test]\n    fn test_decompress_with_capacity() {\n        use crate::stream::raw::Decoder;\n\n        let input = b\"AbcdefghAbcdefgh.\";\n        let compressed = crate::encode_all(&input[..], 1).unwrap();\n\n        // Test writer\n        let mut output = Vec::new();\n        {\n            let mut writer = Writer::new_with_capacity(\n                &mut output,\n                Decoder::new().unwrap(),\n                64,\n            );\n            assert_eq!(writer.buffer.capacity(), 64);\n            writer.write_all(&compressed).unwrap();\n            writer.finish().unwrap();\n        }\n        assert_eq!(&output, input);\n    }\n}\n"
  },
  {
    "path": "tests/issue_182.rs",
    "content": "const TEXT: &[u8] = include_bytes!(\"../assets/example.txt\");\n\n#[test]\n#[should_panic]\nfn test_issue_182() {\n    use std::io::BufRead;\n\n    let compressed = zstd::encode_all(TEXT, 3).unwrap();\n    let truncated = &compressed[..compressed.len() / 2];\n\n    let rdr = zstd::Decoder::new(truncated).unwrap();\n    let rdr = std::io::BufReader::new(rdr);\n    for line in rdr.lines() {\n        line.unwrap();\n    }\n}\n"
  },
  {
    "path": "zstd-safe/Cargo.toml",
    "content": "[package]\nauthors = [\"Alexandre Bury <alexandre.bury@gmail.com>\"]\nname = \"zstd-safe\"\nbuild = \"build.rs\"\nversion = \"7.2.4\"\ndescription = \"Safe low-level bindings for the zstd compression library.\"\nkeywords = [\"zstd\", \"zstandard\", \"compression\"]\ncategories = [\"api-bindings\", \"compression\"]\nrepository = \"https://github.com/gyscos/zstd-rs\"\nlicense = \"BSD-3-Clause\"\nreadme = \"Readme.md\"\nedition = \"2018\"\nrust-version = \"1.64\"\nexclude = [\"update_consts.sh\"]\n\n[package.metadata.docs.rs]\nfeatures = [\"experimental\", \"arrays\", \"std\", \"zdict_builder\", \"doc-cfg\"]\n\n[dependencies]\nzstd-sys = { path = \"zstd-sys\", version = \"2.0.15\", default-features = false }\n\n[features]\ndefault = [\"legacy\", \"arrays\", \"zdict_builder\"]\n\nbindgen = [\"zstd-sys/bindgen\"]\ndebug = [\"zstd-sys/debug\"]\nexperimental = [\"zstd-sys/experimental\"]\nlegacy = [\"zstd-sys/legacy\"]\npkg-config = [\"zstd-sys/pkg-config\"]\nstd = [\"zstd-sys/std\"] # Implements WriteBuf for std types like Cursor and Vec.\nzstdmt = [\"zstd-sys/zstdmt\"]\nthin = [\"zstd-sys/thin\"]\narrays = []\nno_asm = [\"zstd-sys/no_asm\"]\ndoc-cfg = []\nzdict_builder = [\"zstd-sys/zdict_builder\"]\nseekable = [\"zstd-sys/seekable\"]\n\n# These two are for cross-language LTO.\n# Will only work if `clang` is used to build the C library.\nfat-lto = [\"zstd-sys/fat-lto\"]\nthin-lto = [\"zstd-sys/thin-lto\"]\n\n[lints.rust]\nnon_upper_case_globals = \"allow\"\n"
  },
  {
    "path": "zstd-safe/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2026, Alexandre Bury\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its\n   contributors may be used to endorse or promote products derived from\n   this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "zstd-safe/Readme.md",
    "content": "# zstd-safe\n\nThis is a thin, no-std, safe abstraction built on top of the bindings from [zstd-sys].\n\nIt is close to a 1-for-1 mapping to the C functions, but uses rust types like slices instead of pointers and lengths.\n\nFor a more comfortable higher-level library (with `Read`/`Write` implementations), see [zstd-rs].\n\n[zstd-rs]: https://github.com/gyscos/zstd-rs/tree/main/zstd-safe/zstd-sys\n[zstd-rs]: https://github.com/gyscos/zstd-rs\n"
  },
  {
    "path": "zstd-safe/build.rs",
    "content": "fn main() {\n    // Force the `std` feature in some cases\n    let target_arch =\n        std::env::var(\"CARGO_CFG_TARGET_ARCH\").unwrap_or_default();\n    let target_os = std::env::var(\"CARGO_CFG_TARGET_OS\").unwrap_or_default();\n\n    if target_arch == \"wasm32\" || target_os == \"hermit\" {\n        println!(\"cargo:rustc-cfg=feature=\\\"std\\\"\");\n    }\n}\n"
  },
  {
    "path": "zstd-safe/fuzz/.gitignore",
    "content": "target\ncorpus\nartifacts\ncoverage\n"
  },
  {
    "path": "zstd-safe/fuzz/Cargo.toml",
    "content": "[package]\nname = \"zstd-safe-fuzz\"\nversion = \"0.0.0\"\npublish = false\nedition = \"2018\"\n\n[package.metadata]\ncargo-fuzz = true\n\n[package.metadata.docs.rs]\nfeatures = [\"std\"]\n\n[dependencies]\nlibfuzzer-sys = \"0.4\"\nzstd-sys = { path = \"../zstd-sys\", version = \"2.0.10\", default-features = false }\n\n[features]\nstd = [\"zstd-sys/std\"]\n\n[dependencies.zstd-safe]\npath = \"..\"\n\n[[bin]]\nname = \"zstd_fuzzer\"\npath = \"fuzz_targets/zstd_fuzzer.rs\"\ntest = false\ndoc = false\nbench = false\n"
  },
  {
    "path": "zstd-safe/fuzz/fuzz_targets/zstd_fuzzer.rs",
    "content": "#![no_main]\n\nextern crate zstd_safe;\nuse libfuzzer_sys::fuzz_target;\n\nfuzz_target!(|data: &[u8]| {\n    // Generate random sized buffer\n    let buffer_size = std::cmp::min(data.len() * 2, 2048);\n    let mut buffer = vec![0u8; buffer_size];\n\n    // Fuzz compression and decompression\n    for level in 0..=20 {\n        if let Ok(written) = zstd_safe::compress(&mut buffer[..], data, level) {\n            let compressed = &buffer[..written];\n            let mut decompressed = vec![0u8; buffer_size];\n            let _ = zstd_safe::decompress(&mut decompressed[..], compressed).unwrap_or_else(|_| 0);\n        }\n    }\n\n    // Fuzz compression and decompression with CCtx\n    let mut cctx = zstd_safe::CCtx::default();\n    if let Ok(written) = cctx.compress(&mut buffer[..], data, 3) {\n        let compressed = &buffer[..written];\n        let mut dctx = zstd_safe::DCtx::default();\n        let mut decompressed = vec![0u8; buffer_size];\n        let _ = dctx.decompress(&mut decompressed[..], compressed).unwrap_or_else(|_| 0);\n    }\n\n    // Fuzz compression and decompression on dict\n    let dict = b\"sample dictionary for zstd fuzzing\";\n    let mut cctx_dict = zstd_safe::CCtx::default();\n    if let Ok(written) = cctx_dict.compress_using_dict(&mut buffer[..], data, dict, 3) {\n        let compressed = &buffer[..written];\n\n        let mut dctx_dict = zstd_safe::DCtx::default();\n        let mut decompressed = vec![0u8; buffer_size];\n        let _ = dctx_dict.decompress_using_dict(&mut decompressed[..], compressed, dict).unwrap_or_else(|_| 0);\n    }\n\n    // Fuzz compression and decompression with streaming\n    let mut cctx_stream = zstd_safe::CCtx::default();\n    let mut dctx_stream = zstd_safe::DCtx::default();\n    let mut in_buffer = zstd_safe::InBuffer::around(data);\n    let mut out_buffer = zstd_safe::OutBuffer::around(&mut buffer[..]);\n\n    if let Ok(_) = cctx_stream.compress_stream(&mut out_buffer, &mut in_buffer) {\n        let mut decompressed_stream = vec![0u8; buffer_size];\n        let mut out_buffer_stream = zstd_safe::OutBuffer::around(&mut decompressed_stream[..]);\n        let mut in_buffer_stream = zstd_safe::InBuffer::around(out_buffer.as_slice());\n        let _ = dctx_stream.decompress_stream(&mut out_buffer_stream, &mut in_buffer_stream).unwrap_or_else(|_| 0);\n    }\n\n    // Fuzz error handling and malformed input\n    let mut cctx_param = zstd_safe::CCtx::default();\n    if let Ok(_) = cctx_param.set_parameter(zstd_safe::CParameter::ChecksumFlag(true)) {\n        if let Ok(written) = cctx_param.compress2(&mut buffer[..], data) {\n            let compressed = &buffer[..written];\n            let mut dctx_param = zstd_safe::DCtx::default();\n            let mut decompressed = vec![0u8; buffer_size];\n            let _ = dctx_param.decompress(&mut decompressed[..], compressed).unwrap_or_else(|_| 0);\n        }\n    }\n    if let Ok(written) = zstd_safe::compress(&mut buffer[..], data, 3) {\n        let compressed = &mut buffer[..written];\n        for i in (0..compressed.len()).step_by(5) {\n            compressed[i] = compressed[i].wrapping_add(1);\n        }\n\n        let mut decompressed = vec![0u8; 2048];\n        let mut dctx = zstd_safe::DCtx::default();\n        let _ = dctx.decompress(&mut decompressed[..], compressed).unwrap_or_else(|_| 0);\n    }\n});\n"
  },
  {
    "path": "zstd-safe/src/constants.rs",
    "content": "// This file has been generated by ./update_consts.sh\npub const BLOCKSIZELOG_MAX: u32 = zstd_sys::ZSTD_BLOCKSIZELOG_MAX;\npub const BLOCKSIZE_MAX: u32 = zstd_sys::ZSTD_BLOCKSIZE_MAX;\npub const CLEVEL_DEFAULT: CompressionLevel = zstd_sys::ZSTD_CLEVEL_DEFAULT as CompressionLevel;\npub const CONTENTSIZE_ERROR: u64 = zstd_sys::ZSTD_CONTENTSIZE_ERROR as u64;\npub const CONTENTSIZE_UNKNOWN: u64 = zstd_sys::ZSTD_CONTENTSIZE_UNKNOWN as u64;\npub const MAGIC_DICTIONARY: u32 = zstd_sys::ZSTD_MAGIC_DICTIONARY;\npub const MAGICNUMBER: u32 = zstd_sys::ZSTD_MAGICNUMBER;\npub const MAGIC_SKIPPABLE_MASK: u32 = zstd_sys::ZSTD_MAGIC_SKIPPABLE_MASK;\npub const MAGIC_SKIPPABLE_START: u32 = zstd_sys::ZSTD_MAGIC_SKIPPABLE_START;\npub const VERSION_MAJOR: u32 = zstd_sys::ZSTD_VERSION_MAJOR;\npub const VERSION_MINOR: u32 = zstd_sys::ZSTD_VERSION_MINOR;\npub const VERSION_NUMBER: u32 = zstd_sys::ZSTD_VERSION_NUMBER;\npub const VERSION_RELEASE: u32 = zstd_sys::ZSTD_VERSION_RELEASE;\n"
  },
  {
    "path": "zstd-safe/src/constants_experimental.rs",
    "content": "// This file has been generated by ./update_consts.sh\npub const BLOCKSIZE_MAX_MIN: u32 = zstd_sys::ZSTD_BLOCKSIZE_MAX_MIN;\npub const BLOCKSPLITTER_LEVEL_MAX: u32 = zstd_sys::ZSTD_BLOCKSPLITTER_LEVEL_MAX;\npub const CHAINLOG_MAX_32: u32 = zstd_sys::ZSTD_CHAINLOG_MAX_32;\npub const CHAINLOG_MAX_64: u32 = zstd_sys::ZSTD_CHAINLOG_MAX_64;\npub const CHAINLOG_MIN: u32 = zstd_sys::ZSTD_CHAINLOG_MIN;\npub const FRAMEHEADERSIZE_MAX: u32 = zstd_sys::ZSTD_FRAMEHEADERSIZE_MAX;\npub const HASHLOG_MIN: u32 = zstd_sys::ZSTD_HASHLOG_MIN;\npub const LDM_BUCKETSIZELOG_MAX: u32 = zstd_sys::ZSTD_LDM_BUCKETSIZELOG_MAX;\npub const LDM_BUCKETSIZELOG_MIN: u32 = zstd_sys::ZSTD_LDM_BUCKETSIZELOG_MIN;\npub const LDM_HASHLOG_MIN: u32 = zstd_sys::ZSTD_LDM_HASHLOG_MIN;\npub const LDM_HASHRATELOG_MIN: u32 = zstd_sys::ZSTD_LDM_HASHRATELOG_MIN;\npub const LDM_MINMATCH_MAX: u32 = zstd_sys::ZSTD_LDM_MINMATCH_MAX;\npub const LDM_MINMATCH_MIN: u32 = zstd_sys::ZSTD_LDM_MINMATCH_MIN;\npub const MINMATCH_MAX: u32 = zstd_sys::ZSTD_MINMATCH_MAX;\npub const MINMATCH_MIN: u32 = zstd_sys::ZSTD_MINMATCH_MIN;\npub const OVERLAPLOG_MAX: u32 = zstd_sys::ZSTD_OVERLAPLOG_MAX;\npub const OVERLAPLOG_MIN: u32 = zstd_sys::ZSTD_OVERLAPLOG_MIN;\npub const SEARCHLOG_MIN: u32 = zstd_sys::ZSTD_SEARCHLOG_MIN;\npub const SKIPPABLEHEADERSIZE: u32 = zstd_sys::ZSTD_SKIPPABLEHEADERSIZE;\npub const SRCSIZEHINT_MIN: u32 = zstd_sys::ZSTD_SRCSIZEHINT_MIN;\npub const TARGETCBLOCKSIZE_MAX: u32 = zstd_sys::ZSTD_TARGETCBLOCKSIZE_MAX;\npub const TARGETCBLOCKSIZE_MIN: u32 = zstd_sys::ZSTD_TARGETCBLOCKSIZE_MIN;\npub const TARGETLENGTH_MAX: u32 = zstd_sys::ZSTD_TARGETLENGTH_MAX;\npub const TARGETLENGTH_MIN: u32 = zstd_sys::ZSTD_TARGETLENGTH_MIN;\npub const WINDOWLOG_LIMIT_DEFAULT: u32 = zstd_sys::ZSTD_WINDOWLOG_LIMIT_DEFAULT;\npub const WINDOWLOG_MAX_32: u32 = zstd_sys::ZSTD_WINDOWLOG_MAX_32;\npub const WINDOWLOG_MAX_64: u32 = zstd_sys::ZSTD_WINDOWLOG_MAX_64;\npub const WINDOWLOG_MIN: u32 = zstd_sys::ZSTD_WINDOWLOG_MIN;\n"
  },
  {
    "path": "zstd-safe/src/constants_seekable.rs",
    "content": "// This file has been generated by ./update_consts.sh\npub const SEEKABLE_FRAMEINDEX_TOOLARGE: u64 = zstd_sys::ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE as u64;\npub const SEEKABLE_MAGICNUMBER: u32 = zstd_sys::ZSTD_SEEKABLE_MAGICNUMBER;\npub const SEEKABLE_MAX_FRAME_DECOMPRESSED_SIZE: u32 = zstd_sys::ZSTD_SEEKABLE_MAX_FRAME_DECOMPRESSED_SIZE;\npub const SEEKABLE_MAXFRAMES: u32 = zstd_sys::ZSTD_SEEKABLE_MAXFRAMES;\npub const seekTableFooterSize: u32 = zstd_sys::ZSTD_seekTableFooterSize;\n"
  },
  {
    "path": "zstd-safe/src/lib.rs",
    "content": "#![no_std]\n//! Minimal safe wrapper around zstd-sys.\n//!\n//! This crates provides a minimal translation of the [zstd-sys] methods.\n//! For a more comfortable high-level library, see the [zstd] crate.\n//!\n//! [zstd-sys]: https://crates.io/crates/zstd-sys\n//! [zstd]: https://crates.io/crates/zstd\n//!\n//! Most of the functions here map 1-for-1 to a function from\n//! [the C zstd library][zstd-c] mentioned in their descriptions.\n//! Check the [source documentation][doc] for more information on their\n//! behaviour.\n//!\n//! [doc]: https://facebook.github.io/zstd/zstd_manual.html\n//! [zstd-c]: https://facebook.github.io/zstd/\n//!\n//! Features denoted as experimental in the C library are hidden behind an\n//! `experimental` feature.\n#![cfg_attr(feature = \"doc-cfg\", feature(doc_cfg))]\n\n// TODO: Use alloc feature instead to implement stuff for Vec\n// TODO: What about Cursor?\n#[cfg(feature = \"std\")]\nextern crate std;\n\n#[cfg(test)]\nmod tests;\n\n#[cfg(feature = \"seekable\")]\npub mod seekable;\n\n// Re-export zstd-sys\npub use zstd_sys;\n\n/// How to compress data.\npub use zstd_sys::ZSTD_strategy as Strategy;\n\n/// Frame progression state.\n#[cfg(feature = \"experimental\")]\n#[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\npub use zstd_sys::ZSTD_frameProgression as FrameProgression;\n\n/// Reset directive.\n// pub use zstd_sys::ZSTD_ResetDirective as ResetDirective;\nuse core::ffi::{c_char, c_int, c_ulonglong, c_void};\n\nuse core::marker::PhantomData;\nuse core::num::{NonZeroU32, NonZeroU64};\nuse core::ops::{Deref, DerefMut};\nuse core::ptr::NonNull;\nuse core::str;\n\ninclude!(\"constants.rs\");\n\n#[cfg(feature = \"experimental\")]\ninclude!(\"constants_experimental.rs\");\n\n#[cfg(feature = \"seekable\")]\ninclude!(\"constants_seekable.rs\");\n\n/// Represents the compression level used by zstd.\npub type CompressionLevel = i32;\n\n/// Represents a possible error from the zstd library.\npub type ErrorCode = usize;\n\n/// Wrapper result around most zstd functions.\n///\n/// Either a success code (usually number of bytes written), or an error code.\npub type SafeResult = Result<usize, ErrorCode>;\n\n/// Indicates an error happened when parsing the frame content size.\n///\n/// The stream may be corrupted, or the given frame prefix was too small.\n#[derive(Debug)]\npub struct ContentSizeError;\n\nimpl core::fmt::Display for ContentSizeError {\n    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {\n        f.write_str(\"Could not get content size\")\n    }\n}\n\n/// Returns true if code represents error.\nfn is_error(code: usize) -> bool {\n    // Safety: Just FFI\n    unsafe { zstd_sys::ZSTD_isError(code) != 0 }\n}\n\n/// Parse the result code\n///\n/// Returns the number of bytes written if the code represents success,\n/// or the error message code otherwise.\nfn parse_code(code: usize) -> SafeResult {\n    if !is_error(code) {\n        Ok(code)\n    } else {\n        Err(code)\n    }\n}\n\n/// Parse a content size value.\n///\n/// zstd uses 2 special content size values to indicate either unknown size or parsing error.\nfn parse_content_size(\n    content_size: u64,\n) -> Result<Option<u64>, ContentSizeError> {\n    match content_size {\n        CONTENTSIZE_ERROR => Err(ContentSizeError),\n        CONTENTSIZE_UNKNOWN => Ok(None),\n        other => Ok(Some(other)),\n    }\n}\n\nfn ptr_void(src: &[u8]) -> *const c_void {\n    src.as_ptr() as *const c_void\n}\n\nfn ptr_mut_void(dst: &mut (impl WriteBuf + ?Sized)) -> *mut c_void {\n    dst.as_mut_ptr() as *mut c_void\n}\n\n/// Returns the ZSTD version.\n///\n/// Returns `major * 10_000 + minor * 100 + patch`.\n/// So 1.5.3 would be returned as `10_503`.\npub fn version_number() -> u32 {\n    // Safety: Just FFI\n    unsafe { zstd_sys::ZSTD_versionNumber() as u32 }\n}\n\n/// Returns a string representation of the ZSTD version.\n///\n/// For example \"1.5.3\".\npub fn version_string() -> &'static str {\n    // Safety: Assumes `ZSTD_versionString` returns a valid utf8 string.\n    unsafe { c_char_to_str(zstd_sys::ZSTD_versionString()) }\n}\n\n/// Returns the minimum (fastest) compression level supported.\n///\n/// This is likely going to be a _very_ large negative number.\npub fn min_c_level() -> CompressionLevel {\n    // Safety: Just FFI\n    unsafe { zstd_sys::ZSTD_minCLevel() as CompressionLevel }\n}\n\n/// Returns the maximum (slowest) compression level supported.\npub fn max_c_level() -> CompressionLevel {\n    // Safety: Just FFI\n    unsafe { zstd_sys::ZSTD_maxCLevel() as CompressionLevel }\n}\n\n/// Wraps the `ZSTD_compress` function.\n///\n/// This will try to compress `src` entirely and write the result to `dst`, returning the number of\n/// bytes written. If `dst` is too small to hold the compressed content, an error will be returned.\n///\n/// For streaming operations that don't require to store the entire input/output in memory, see\n/// `compress_stream`.\npub fn compress<C: WriteBuf + ?Sized>(\n    dst: &mut C,\n    src: &[u8],\n    compression_level: CompressionLevel,\n) -> SafeResult {\n    // Safety: ZSTD_compress indeed returns how many bytes have been written.\n    unsafe {\n        dst.write_from(|buffer, capacity| {\n            parse_code(zstd_sys::ZSTD_compress(\n                buffer,\n                capacity,\n                ptr_void(src),\n                src.len(),\n                compression_level,\n            ))\n        })\n    }\n}\n\n/// Wraps the `ZSTD_decompress` function.\n///\n/// This is a one-step decompression (not streaming).\n///\n/// You will need to make sure `dst` is large enough to store all the decompressed content, or an\n/// error will be returned.\n///\n/// If decompression was a success, the number of bytes written will be returned.\npub fn decompress<C: WriteBuf + ?Sized>(\n    dst: &mut C,\n    src: &[u8],\n) -> SafeResult {\n    // Safety: ZSTD_decompress indeed returns how many bytes have been written.\n    unsafe {\n        dst.write_from(|buffer, capacity| {\n            parse_code(zstd_sys::ZSTD_decompress(\n                buffer,\n                capacity,\n                ptr_void(src),\n                src.len(),\n            ))\n        })\n    }\n}\n\n/// Wraps the `ZSTD_getDecompressedSize` function.\n///\n/// Returns `None` if the size could not be found, or if the content is actually empty.\n#[deprecated(note = \"Use ZSTD_getFrameContentSize instead\")]\npub fn get_decompressed_size(src: &[u8]) -> Option<NonZeroU64> {\n    // Safety: Just FFI\n    NonZeroU64::new(unsafe {\n        zstd_sys::ZSTD_getDecompressedSize(ptr_void(src), src.len()) as u64\n    })\n}\n\n/// Maximum compressed size in worst case single-pass scenario\npub fn compress_bound(src_size: usize) -> usize {\n    // Safety: Just FFI\n    unsafe { zstd_sys::ZSTD_compressBound(src_size) }\n}\n\n/// Compression context\n///\n/// It is recommended to allocate a single context per thread and re-use it\n/// for many compression operations.\npub struct CCtx<'a>(NonNull<zstd_sys::ZSTD_CCtx>, PhantomData<&'a ()>);\n\nimpl Default for CCtx<'_> {\n    fn default() -> Self {\n        CCtx::create()\n    }\n}\n\nimpl<'a> CCtx<'a> {\n    /// Tries to create a new context.\n    ///\n    /// Returns `None` if zstd returns a NULL pointer - may happen if allocation fails.\n    pub fn try_create() -> Option<Self> {\n        // Safety: Just FFI\n        Some(CCtx(\n            NonNull::new(unsafe { zstd_sys::ZSTD_createCCtx() })?,\n            PhantomData,\n        ))\n    }\n\n    /// Wrap `ZSTD_createCCtx`\n    ///\n    /// # Panics\n    ///\n    /// If zstd returns a NULL pointer.\n    pub fn create() -> Self {\n        Self::try_create()\n            .expect(\"zstd returned null pointer when creating new context\")\n    }\n\n    /// Wraps the `ZSTD_compressCCtx()` function\n    pub fn compress<C: WriteBuf + ?Sized>(\n        &mut self,\n        dst: &mut C,\n        src: &[u8],\n        compression_level: CompressionLevel,\n    ) -> SafeResult {\n        // Safety: ZSTD_compressCCtx returns how many bytes were written.\n        unsafe {\n            dst.write_from(|buffer, capacity| {\n                parse_code(zstd_sys::ZSTD_compressCCtx(\n                    self.0.as_ptr(),\n                    buffer,\n                    capacity,\n                    ptr_void(src),\n                    src.len(),\n                    compression_level,\n                ))\n            })\n        }\n    }\n\n    /// Wraps the `ZSTD_compress2()` function.\n    pub fn compress2<C: WriteBuf + ?Sized>(\n        &mut self,\n        dst: &mut C,\n        src: &[u8],\n    ) -> SafeResult {\n        // Safety: ZSTD_compress2 returns how many bytes were written.\n        unsafe {\n            dst.write_from(|buffer, capacity| {\n                parse_code(zstd_sys::ZSTD_compress2(\n                    self.0.as_ptr(),\n                    buffer,\n                    capacity,\n                    ptr_void(src),\n                    src.len(),\n                ))\n            })\n        }\n    }\n\n    /// Wraps the `ZSTD_compress_usingDict()` function.\n    pub fn compress_using_dict<C: WriteBuf + ?Sized>(\n        &mut self,\n        dst: &mut C,\n        src: &[u8],\n        dict: &[u8],\n        compression_level: CompressionLevel,\n    ) -> SafeResult {\n        // Safety: ZSTD_compress_usingDict returns how many bytes were written.\n        unsafe {\n            dst.write_from(|buffer, capacity| {\n                parse_code(zstd_sys::ZSTD_compress_usingDict(\n                    self.0.as_ptr(),\n                    buffer,\n                    capacity,\n                    ptr_void(src),\n                    src.len(),\n                    ptr_void(dict),\n                    dict.len(),\n                    compression_level,\n                ))\n            })\n        }\n    }\n\n    /// Wraps the `ZSTD_compress_usingCDict()` function.\n    pub fn compress_using_cdict<C: WriteBuf + ?Sized>(\n        &mut self,\n        dst: &mut C,\n        src: &[u8],\n        cdict: &CDict<'_>,\n    ) -> SafeResult {\n        // Safety: ZSTD_compress_usingCDict returns how many bytes were written.\n        unsafe {\n            dst.write_from(|buffer, capacity| {\n                parse_code(zstd_sys::ZSTD_compress_usingCDict(\n                    self.0.as_ptr(),\n                    buffer,\n                    capacity,\n                    ptr_void(src),\n                    src.len(),\n                    cdict.0.as_ptr(),\n                ))\n            })\n        }\n    }\n\n    /// Initializes the context with the given compression level.\n    ///\n    /// This is equivalent to running:\n    /// * `reset()`\n    /// * `set_parameter(CompressionLevel, compression_level)`\n    pub fn init(&mut self, compression_level: CompressionLevel) -> SafeResult {\n        // Safety: Just FFI\n        let code = unsafe {\n            zstd_sys::ZSTD_initCStream(self.0.as_ptr(), compression_level)\n        };\n        parse_code(code)\n    }\n\n    /// Wraps the `ZSTD_initCStream_srcSize()` function.\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    #[deprecated]\n    pub fn init_src_size(\n        &mut self,\n        compression_level: CompressionLevel,\n        pledged_src_size: u64,\n    ) -> SafeResult {\n        // Safety: Just FFI\n        let code = unsafe {\n            zstd_sys::ZSTD_initCStream_srcSize(\n                self.0.as_ptr(),\n                compression_level as c_int,\n                pledged_src_size as c_ulonglong,\n            )\n        };\n        parse_code(code)\n    }\n\n    /// Wraps the `ZSTD_initCStream_usingDict()` function.\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    #[deprecated]\n    pub fn init_using_dict(\n        &mut self,\n        dict: &[u8],\n        compression_level: CompressionLevel,\n    ) -> SafeResult {\n        // Safety: Just FFI\n        let code = unsafe {\n            zstd_sys::ZSTD_initCStream_usingDict(\n                self.0.as_ptr(),\n                ptr_void(dict),\n                dict.len(),\n                compression_level,\n            )\n        };\n        parse_code(code)\n    }\n\n    /// Wraps the `ZSTD_initCStream_usingCDict()` function.\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    #[deprecated]\n    pub fn init_using_cdict<'b>(&mut self, cdict: &CDict<'b>) -> SafeResult\n    where\n        'b: 'a, // Dictionary outlives the stream.\n    {\n        // Safety: Just FFI\n        let code = unsafe {\n            zstd_sys::ZSTD_initCStream_usingCDict(\n                self.0.as_ptr(),\n                cdict.0.as_ptr(),\n            )\n        };\n        parse_code(code)\n    }\n\n    /// Tries to load a dictionary.\n    ///\n    /// The dictionary content will be copied internally and does not need to be kept alive after\n    /// calling this function.\n    ///\n    /// If you need to use the same dictionary for multiple contexts, it may be more efficient to\n    /// create a `CDict` first, then loads that.\n    ///\n    /// The dictionary will apply to all compressed frames, until a new dictionary is set.\n    pub fn load_dictionary(&mut self, dict: &[u8]) -> SafeResult {\n        // Safety: Just FFI\n        parse_code(unsafe {\n            zstd_sys::ZSTD_CCtx_loadDictionary(\n                self.0.as_ptr(),\n                ptr_void(dict),\n                dict.len(),\n            )\n        })\n    }\n\n    /// Wraps the `ZSTD_CCtx_refCDict()` function.\n    ///\n    /// Dictionary must outlive the context.\n    pub fn ref_cdict<'b>(&mut self, cdict: &CDict<'b>) -> SafeResult\n    where\n        'b: 'a,\n    {\n        // Safety: Just FFI\n        parse_code(unsafe {\n            zstd_sys::ZSTD_CCtx_refCDict(self.0.as_ptr(), cdict.0.as_ptr())\n        })\n    }\n\n    /// Return to \"no-dictionary\" mode.\n    ///\n    /// This will disable any dictionary/prefix previously registered for future frames.\n    pub fn disable_dictionary(&mut self) -> SafeResult {\n        // Safety: Just FFI\n        parse_code(unsafe {\n            zstd_sys::ZSTD_CCtx_loadDictionary(\n                self.0.as_ptr(),\n                core::ptr::null(),\n                0,\n            )\n        })\n    }\n\n    /// Use some prefix as single-use dictionary for the next compressed frame.\n    ///\n    /// Just like a dictionary, decompression will need to be given the same prefix.\n    ///\n    /// This is best used if the \"prefix\" looks like the data to be compressed.\n    pub fn ref_prefix<'b>(&mut self, prefix: &'b [u8]) -> SafeResult\n    where\n        'b: 'a,\n    {\n        // Safety: Just FFI\n        parse_code(unsafe {\n            zstd_sys::ZSTD_CCtx_refPrefix(\n                self.0.as_ptr(),\n                ptr_void(prefix),\n                prefix.len(),\n            )\n        })\n    }\n\n    /// Performs a step of a streaming compression operation.\n    ///\n    /// This will read some data from `input` and/or write some data to `output`.\n    ///\n    /// # Returns\n    ///\n    /// A hint for the \"ideal\" amount of input data to provide in the next call.\n    ///\n    /// This hint is only for performance purposes.\n    ///\n    /// Wraps the `ZSTD_compressStream()` function.\n    pub fn compress_stream<C: WriteBuf + ?Sized>(\n        &mut self,\n        output: &mut OutBuffer<'_, C>,\n        input: &mut InBuffer<'_>,\n    ) -> SafeResult {\n        let mut output = output.wrap();\n        let mut input = input.wrap();\n        // Safety: Just FFI\n        let code = unsafe {\n            zstd_sys::ZSTD_compressStream(\n                self.0.as_ptr(),\n                ptr_mut(&mut output),\n                ptr_mut(&mut input),\n            )\n        };\n        parse_code(code)\n    }\n\n    /// Performs a step of a streaming compression operation.\n    ///\n    /// This will read some data from `input` and/or write some data to `output`.\n    ///\n    /// The `end_op` directive can be used to specify what to do after: nothing special, flush\n    /// internal buffers, or end the frame.\n    ///\n    /// # Returns\n    ///\n    /// An lower bound for the amount of data that still needs to be flushed out.\n    ///\n    /// This is useful when flushing or ending the frame: you need to keep calling this function\n    /// until it returns 0.\n    ///\n    /// Wraps the `ZSTD_compressStream2()` function.\n    pub fn compress_stream2<C: WriteBuf + ?Sized>(\n        &mut self,\n        output: &mut OutBuffer<'_, C>,\n        input: &mut InBuffer<'_>,\n        end_op: zstd_sys::ZSTD_EndDirective,\n    ) -> SafeResult {\n        let mut output = output.wrap();\n        let mut input = input.wrap();\n        // Safety: Just FFI\n        parse_code(unsafe {\n            zstd_sys::ZSTD_compressStream2(\n                self.0.as_ptr(),\n                ptr_mut(&mut output),\n                ptr_mut(&mut input),\n                end_op,\n            )\n        })\n    }\n\n    /// Flush any intermediate buffer.\n    ///\n    /// To fully flush, you should keep calling this function until it returns `Ok(0)`.\n    ///\n    /// Wraps the `ZSTD_flushStream()` function.\n    pub fn flush_stream<C: WriteBuf + ?Sized>(\n        &mut self,\n        output: &mut OutBuffer<'_, C>,\n    ) -> SafeResult {\n        let mut output = output.wrap();\n        // Safety: Just FFI\n        let code = unsafe {\n            zstd_sys::ZSTD_flushStream(self.0.as_ptr(), ptr_mut(&mut output))\n        };\n        parse_code(code)\n    }\n\n    /// Ends the stream.\n    ///\n    /// You should keep calling this function until it returns `Ok(0)`.\n    ///\n    /// Wraps the `ZSTD_endStream()` function.\n    pub fn end_stream<C: WriteBuf + ?Sized>(\n        &mut self,\n        output: &mut OutBuffer<'_, C>,\n    ) -> SafeResult {\n        let mut output = output.wrap();\n        // Safety: Just FFI\n        let code = unsafe {\n            zstd_sys::ZSTD_endStream(self.0.as_ptr(), ptr_mut(&mut output))\n        };\n        parse_code(code)\n    }\n\n    /// Returns the size currently used by this context.\n    ///\n    /// This may change over time.\n    pub fn sizeof(&self) -> usize {\n        // Safety: Just FFI\n        unsafe { zstd_sys::ZSTD_sizeof_CCtx(self.0.as_ptr()) }\n    }\n\n    /// Resets the state of the context.\n    ///\n    /// Depending on the reset mode, it can reset the session, the parameters, or both.\n    ///\n    /// Wraps the `ZSTD_CCtx_reset()` function.\n    pub fn reset(&mut self, reset: ResetDirective) -> SafeResult {\n        // Safety: Just FFI\n        parse_code(unsafe {\n            zstd_sys::ZSTD_CCtx_reset(self.0.as_ptr(), reset.as_sys())\n        })\n    }\n\n    /// Sets a compression parameter.\n    ///\n    /// Some of these parameters need to be set during de-compression as well.\n    pub fn set_parameter(&mut self, param: CParameter) -> SafeResult {\n        // TODO: Until bindgen properly generates a binding for this, we'll need to do it here.\n\n        #[cfg(feature = \"experimental\")]\n        use zstd_sys::ZSTD_cParameter::{\n            ZSTD_c_experimentalParam1 as ZSTD_c_rsyncable,\n            ZSTD_c_experimentalParam10 as ZSTD_c_stableOutBuffer,\n            ZSTD_c_experimentalParam11 as ZSTD_c_blockDelimiters,\n            ZSTD_c_experimentalParam12 as ZSTD_c_validateSequences,\n            ZSTD_c_experimentalParam13 as ZSTD_c_useBlockSplitter,\n            ZSTD_c_experimentalParam14 as ZSTD_c_useRowMatchFinder,\n            ZSTD_c_experimentalParam15 as ZSTD_c_deterministicRefPrefix,\n            ZSTD_c_experimentalParam16 as ZSTD_c_prefetchCDictTables,\n            ZSTD_c_experimentalParam17 as ZSTD_c_enableSeqProducerFallback,\n            ZSTD_c_experimentalParam18 as ZSTD_c_maxBlockSize,\n            ZSTD_c_experimentalParam19 as ZSTD_c_searchForExternalRepcodes,\n            ZSTD_c_experimentalParam2 as ZSTD_c_format,\n            ZSTD_c_experimentalParam3 as ZSTD_c_forceMaxWindow,\n            ZSTD_c_experimentalParam4 as ZSTD_c_forceAttachDict,\n            ZSTD_c_experimentalParam5 as ZSTD_c_literalCompressionMode,\n            ZSTD_c_experimentalParam7 as ZSTD_c_srcSizeHint,\n            ZSTD_c_experimentalParam8 as ZSTD_c_enableDedicatedDictSearch,\n            ZSTD_c_experimentalParam9 as ZSTD_c_stableInBuffer,\n        };\n\n        use zstd_sys::ZSTD_cParameter::*;\n        use CParameter::*;\n\n        let (param, value) = match param {\n            #[cfg(feature = \"experimental\")]\n            RSyncable(rsyncable) => (ZSTD_c_rsyncable, rsyncable as c_int),\n            #[cfg(feature = \"experimental\")]\n            Format(format) => (ZSTD_c_format, format as c_int),\n            #[cfg(feature = \"experimental\")]\n            ForceMaxWindow(force) => (ZSTD_c_forceMaxWindow, force as c_int),\n            #[cfg(feature = \"experimental\")]\n            ForceAttachDict(force) => (ZSTD_c_forceAttachDict, force as c_int),\n            #[cfg(feature = \"experimental\")]\n            LiteralCompressionMode(mode) => {\n                (ZSTD_c_literalCompressionMode, mode as c_int)\n            }\n            #[cfg(feature = \"experimental\")]\n            SrcSizeHint(value) => (ZSTD_c_srcSizeHint, value as c_int),\n            #[cfg(feature = \"experimental\")]\n            EnableDedicatedDictSearch(enable) => {\n                (ZSTD_c_enableDedicatedDictSearch, enable as c_int)\n            }\n            #[cfg(feature = \"experimental\")]\n            StableInBuffer(stable) => (ZSTD_c_stableInBuffer, stable as c_int),\n            #[cfg(feature = \"experimental\")]\n            StableOutBuffer(stable) => {\n                (ZSTD_c_stableOutBuffer, stable as c_int)\n            }\n            #[cfg(feature = \"experimental\")]\n            BlockDelimiters(value) => (ZSTD_c_blockDelimiters, value as c_int),\n            #[cfg(feature = \"experimental\")]\n            ValidateSequences(validate) => {\n                (ZSTD_c_validateSequences, validate as c_int)\n            }\n            #[cfg(feature = \"experimental\")]\n            UseBlockSplitter(split) => {\n                (ZSTD_c_useBlockSplitter, split as c_int)\n            }\n            #[cfg(feature = \"experimental\")]\n            UseRowMatchFinder(mode) => {\n                (ZSTD_c_useRowMatchFinder, mode as c_int)\n            }\n            #[cfg(feature = \"experimental\")]\n            DeterministicRefPrefix(deterministic) => {\n                (ZSTD_c_deterministicRefPrefix, deterministic as c_int)\n            }\n            #[cfg(feature = \"experimental\")]\n            PrefetchCDictTables(prefetch) => {\n                (ZSTD_c_prefetchCDictTables, prefetch as c_int)\n            }\n            #[cfg(feature = \"experimental\")]\n            EnableSeqProducerFallback(enable) => {\n                (ZSTD_c_enableSeqProducerFallback, enable as c_int)\n            }\n            #[cfg(feature = \"experimental\")]\n            MaxBlockSize(value) => (ZSTD_c_maxBlockSize, value as c_int),\n            #[cfg(feature = \"experimental\")]\n            SearchForExternalRepcodes(value) => {\n                (ZSTD_c_searchForExternalRepcodes, value as c_int)\n            }\n            TargetCBlockSize(value) => {\n                (ZSTD_c_targetCBlockSize, value as c_int)\n            }\n            CompressionLevel(level) => (ZSTD_c_compressionLevel, level),\n            WindowLog(value) => (ZSTD_c_windowLog, value as c_int),\n            HashLog(value) => (ZSTD_c_hashLog, value as c_int),\n            ChainLog(value) => (ZSTD_c_chainLog, value as c_int),\n            SearchLog(value) => (ZSTD_c_searchLog, value as c_int),\n            MinMatch(value) => (ZSTD_c_minMatch, value as c_int),\n            TargetLength(value) => (ZSTD_c_targetLength, value as c_int),\n            Strategy(strategy) => (ZSTD_c_strategy, strategy as c_int),\n            EnableLongDistanceMatching(flag) => {\n                (ZSTD_c_enableLongDistanceMatching, flag as c_int)\n            }\n            LdmHashLog(value) => (ZSTD_c_ldmHashLog, value as c_int),\n            LdmMinMatch(value) => (ZSTD_c_ldmMinMatch, value as c_int),\n            LdmBucketSizeLog(value) => {\n                (ZSTD_c_ldmBucketSizeLog, value as c_int)\n            }\n            LdmHashRateLog(value) => (ZSTD_c_ldmHashRateLog, value as c_int),\n            ContentSizeFlag(flag) => (ZSTD_c_contentSizeFlag, flag as c_int),\n            ChecksumFlag(flag) => (ZSTD_c_checksumFlag, flag as c_int),\n            DictIdFlag(flag) => (ZSTD_c_dictIDFlag, flag as c_int),\n\n            NbWorkers(value) => (ZSTD_c_nbWorkers, value as c_int),\n\n            JobSize(value) => (ZSTD_c_jobSize, value as c_int),\n\n            OverlapSizeLog(value) => (ZSTD_c_overlapLog, value as c_int),\n        };\n\n        // Safety: Just FFI\n        parse_code(unsafe {\n            zstd_sys::ZSTD_CCtx_setParameter(self.0.as_ptr(), param, value)\n        })\n    }\n\n    /// Guarantee that the input size will be this value.\n    ///\n    /// If given `None`, assumes the size is unknown.\n    ///\n    /// Unless explicitly disabled, this will cause the size to be written in the compressed frame\n    /// header.\n    ///\n    /// If the actual data given to compress has a different size, an error will be returned.\n    pub fn set_pledged_src_size(\n        &mut self,\n        pledged_src_size: Option<u64>,\n    ) -> SafeResult {\n        // Safety: Just FFI\n        parse_code(unsafe {\n            zstd_sys::ZSTD_CCtx_setPledgedSrcSize(\n                self.0.as_ptr(),\n                pledged_src_size.unwrap_or(CONTENTSIZE_UNKNOWN) as c_ulonglong,\n            )\n        })\n    }\n\n    /// Creates a copy of this context.\n    ///\n    /// This only works before any data has been compressed. An error will be\n    /// returned otherwise.\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    pub fn try_clone(\n        &self,\n        pledged_src_size: Option<u64>,\n    ) -> Result<Self, ErrorCode> {\n        // Safety: Just FFI\n        let context = NonNull::new(unsafe { zstd_sys::ZSTD_createCCtx() })\n            .ok_or(0usize)?;\n\n        // Safety: Just FFI\n        parse_code(unsafe {\n            zstd_sys::ZSTD_copyCCtx(\n                context.as_ptr(),\n                self.0.as_ptr(),\n                pledged_src_size.unwrap_or(CONTENTSIZE_UNKNOWN),\n            )\n        })?;\n\n        Ok(CCtx(context, self.1))\n    }\n\n    /// Wraps the `ZSTD_getBlockSize()` function.\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    pub fn get_block_size(&self) -> usize {\n        // Safety: Just FFI\n        unsafe { zstd_sys::ZSTD_getBlockSize(self.0.as_ptr()) }\n    }\n\n    /// Wraps the `ZSTD_compressBlock()` function.\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    pub fn compress_block<C: WriteBuf + ?Sized>(\n        &mut self,\n        dst: &mut C,\n        src: &[u8],\n    ) -> SafeResult {\n        // Safety: ZSTD_compressBlock returns the number of bytes written.\n        unsafe {\n            dst.write_from(|buffer, capacity| {\n                parse_code(zstd_sys::ZSTD_compressBlock(\n                    self.0.as_ptr(),\n                    buffer,\n                    capacity,\n                    ptr_void(src),\n                    src.len(),\n                ))\n            })\n        }\n    }\n\n    /// Returns the recommended input buffer size.\n    ///\n    /// Using this size may result in minor performance boost.\n    pub fn in_size() -> usize {\n        // Safety: Just FFI\n        unsafe { zstd_sys::ZSTD_CStreamInSize() }\n    }\n\n    /// Returns the recommended output buffer size.\n    ///\n    /// Using this may result in minor performance boost.\n    pub fn out_size() -> usize {\n        // Safety: Just FFI\n        unsafe { zstd_sys::ZSTD_CStreamOutSize() }\n    }\n\n    /// Use a shared thread pool for this context.\n    ///\n    /// Thread pool must outlive the context.\n    #[cfg(all(feature = \"experimental\", feature = \"zstdmt\"))]\n    #[cfg_attr(\n        feature = \"doc-cfg\",\n        doc(cfg(all(feature = \"experimental\", feature = \"zstdmt\")))\n    )]\n    pub fn ref_thread_pool<'b>(&mut self, pool: &'b ThreadPool) -> SafeResult\n    where\n        'b: 'a,\n    {\n        parse_code(unsafe {\n            zstd_sys::ZSTD_CCtx_refThreadPool(self.0.as_ptr(), pool.0.as_ptr())\n        })\n    }\n\n    /// Return to using a private thread pool for this context.\n    #[cfg(all(feature = \"experimental\", feature = \"zstdmt\"))]\n    #[cfg_attr(\n        feature = \"doc-cfg\",\n        doc(cfg(all(feature = \"experimental\", feature = \"zstdmt\")))\n    )]\n    pub fn disable_thread_pool(&mut self) -> SafeResult {\n        parse_code(unsafe {\n            zstd_sys::ZSTD_CCtx_refThreadPool(\n                self.0.as_ptr(),\n                core::ptr::null_mut(),\n            )\n        })\n    }\n\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    pub fn get_frame_progression(&self) -> FrameProgression {\n        // Safety: Just FFI\n        unsafe { zstd_sys::ZSTD_getFrameProgression(self.0.as_ptr()) }\n    }\n}\n\nimpl<'a> Drop for CCtx<'a> {\n    fn drop(&mut self) {\n        // Safety: Just FFI\n        unsafe {\n            zstd_sys::ZSTD_freeCCtx(self.0.as_ptr());\n        }\n    }\n}\n\nunsafe impl Send for CCtx<'_> {}\n// Non thread-safe methods already take `&mut self`, so it's fine to implement Sync here.\nunsafe impl Sync for CCtx<'_> {}\n\nunsafe fn c_char_to_str(text: *const c_char) -> &'static str {\n    core::ffi::CStr::from_ptr(text)\n        .to_str()\n        .expect(\"bad error message from zstd\")\n}\n\n/// Returns the error string associated with an error code.\npub fn get_error_name(code: usize) -> &'static str {\n    unsafe {\n        // Safety: assumes ZSTD returns a well-formed utf8 string.\n        let name = zstd_sys::ZSTD_getErrorName(code);\n        c_char_to_str(name)\n    }\n}\n\n/// A Decompression Context.\n///\n/// The lifetime references the potential dictionary used for this context.\n///\n/// If no dictionary was used, it will most likely be `'static`.\n///\n/// Same as `DStream`.\npub struct DCtx<'a>(NonNull<zstd_sys::ZSTD_DCtx>, PhantomData<&'a ()>);\n\nimpl Default for DCtx<'_> {\n    fn default() -> Self {\n        DCtx::create()\n    }\n}\n\nimpl<'a> DCtx<'a> {\n    /// Try to create a new decompression context.\n    ///\n    /// Returns `None` if the operation failed (for example, not enough memory).\n    pub fn try_create() -> Option<Self> {\n        Some(DCtx(\n            NonNull::new(unsafe { zstd_sys::ZSTD_createDCtx() })?,\n            PhantomData,\n        ))\n    }\n\n    /// Creates a new decoding context.\n    ///\n    /// # Panics\n    ///\n    /// If the context creation fails.\n    pub fn create() -> Self {\n        Self::try_create()\n            .expect(\"zstd returned null pointer when creating new context\")\n    }\n\n    /// Fully decompress the given frame.\n    ///\n    /// This decompress an entire frame in-memory. If you can have enough memory to store both the\n    /// input and output buffer, then it may be faster that streaming decompression.\n    ///\n    /// Wraps the `ZSTD_decompressDCtx()` function.\n    pub fn decompress<C: WriteBuf + ?Sized>(\n        &mut self,\n        dst: &mut C,\n        src: &[u8],\n    ) -> SafeResult {\n        unsafe {\n            dst.write_from(|buffer, capacity| {\n                parse_code(zstd_sys::ZSTD_decompressDCtx(\n                    self.0.as_ptr(),\n                    buffer,\n                    capacity,\n                    ptr_void(src),\n                    src.len(),\n                ))\n            })\n        }\n    }\n\n    /// Fully decompress the given frame using a dictionary.\n    ///\n    /// Dictionary must be identical to the one used during compression.\n    ///\n    /// If you plan on using the same dictionary multiple times, it is faster to create a `DDict`\n    /// first and use `decompress_using_ddict`.\n    ///\n    /// Wraps `ZSTD_decompress_usingDict`\n    pub fn decompress_using_dict<C: WriteBuf + ?Sized>(\n        &mut self,\n        dst: &mut C,\n        src: &[u8],\n        dict: &[u8],\n    ) -> SafeResult {\n        unsafe {\n            dst.write_from(|buffer, capacity| {\n                parse_code(zstd_sys::ZSTD_decompress_usingDict(\n                    self.0.as_ptr(),\n                    buffer,\n                    capacity,\n                    ptr_void(src),\n                    src.len(),\n                    ptr_void(dict),\n                    dict.len(),\n                ))\n            })\n        }\n    }\n\n    /// Fully decompress the given frame using a dictionary.\n    ///\n    /// Dictionary must be identical to the one used during compression.\n    ///\n    /// Wraps the `ZSTD_decompress_usingDDict()` function.\n    pub fn decompress_using_ddict<C: WriteBuf + ?Sized>(\n        &mut self,\n        dst: &mut C,\n        src: &[u8],\n        ddict: &DDict<'_>,\n    ) -> SafeResult {\n        unsafe {\n            dst.write_from(|buffer, capacity| {\n                parse_code(zstd_sys::ZSTD_decompress_usingDDict(\n                    self.0.as_ptr(),\n                    buffer,\n                    capacity,\n                    ptr_void(src),\n                    src.len(),\n                    ddict.0.as_ptr(),\n                ))\n            })\n        }\n    }\n\n    /// Initializes an existing `DStream` for decompression.\n    ///\n    /// This is equivalent to calling:\n    /// * `reset(SessionOnly)`\n    /// * `disable_dictionary()`\n    ///\n    /// Wraps the `ZSTD_initCStream()` function.\n    pub fn init(&mut self) -> SafeResult {\n        let code = unsafe { zstd_sys::ZSTD_initDStream(self.0.as_ptr()) };\n        parse_code(code)\n    }\n\n    /// Wraps the `ZSTD_initDStream_usingDict()` function.\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    #[deprecated]\n    pub fn init_using_dict(&mut self, dict: &[u8]) -> SafeResult {\n        let code = unsafe {\n            zstd_sys::ZSTD_initDStream_usingDict(\n                self.0.as_ptr(),\n                ptr_void(dict),\n                dict.len(),\n            )\n        };\n        parse_code(code)\n    }\n\n    /// Wraps the `ZSTD_initDStream_usingDDict()` function.\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    #[deprecated]\n    pub fn init_using_ddict<'b>(&mut self, ddict: &DDict<'b>) -> SafeResult\n    where\n        'b: 'a,\n    {\n        let code = unsafe {\n            zstd_sys::ZSTD_initDStream_usingDDict(\n                self.0.as_ptr(),\n                ddict.0.as_ptr(),\n            )\n        };\n        parse_code(code)\n    }\n\n    /// Resets the state of the context.\n    ///\n    /// Depending on the reset mode, it can reset the session, the parameters, or both.\n    ///\n    /// Wraps the `ZSTD_DCtx_reset()` function.\n    pub fn reset(&mut self, reset: ResetDirective) -> SafeResult {\n        parse_code(unsafe {\n            zstd_sys::ZSTD_DCtx_reset(self.0.as_ptr(), reset.as_sys())\n        })\n    }\n\n    /// Loads a dictionary.\n    ///\n    /// This will let this context decompress frames that were compressed using this dictionary.\n    ///\n    /// The dictionary content will be copied internally and does not need to be kept alive after\n    /// calling this function.\n    ///\n    /// If you need to use the same dictionary for multiple contexts, it may be more efficient to\n    /// create a `DDict` first, then loads that.\n    ///\n    /// The dictionary will apply to all future frames, until a new dictionary is set.\n    pub fn load_dictionary(&mut self, dict: &[u8]) -> SafeResult {\n        parse_code(unsafe {\n            zstd_sys::ZSTD_DCtx_loadDictionary(\n                self.0.as_ptr(),\n                ptr_void(dict),\n                dict.len(),\n            )\n        })\n    }\n\n    /// Return to \"no-dictionary\" mode.\n    ///\n    /// This will disable any dictionary/prefix previously registered for future frames.\n    pub fn disable_dictionary(&mut self) -> SafeResult {\n        parse_code(unsafe {\n            zstd_sys::ZSTD_DCtx_loadDictionary(\n                self.0.as_ptr(),\n                core::ptr::null(),\n                0,\n            )\n        })\n    }\n\n    /// References a dictionary.\n    ///\n    /// This will let this context decompress frames compressed with the same dictionary.\n    ///\n    /// It will apply to all frames decompressed by this context (until a new dictionary is set).\n    ///\n    /// Wraps the `ZSTD_DCtx_refDDict()` function.\n    pub fn ref_ddict<'b>(&mut self, ddict: &DDict<'b>) -> SafeResult\n    where\n        'b: 'a,\n    {\n        parse_code(unsafe {\n            zstd_sys::ZSTD_DCtx_refDDict(self.0.as_ptr(), ddict.0.as_ptr())\n        })\n    }\n\n    /// Use some prefix as single-use dictionary for the next frame.\n    ///\n    /// Just like a dictionary, this only works if compression was done with the same prefix.\n    ///\n    /// But unlike a dictionary, this only applies to the next frame.\n    ///\n    /// Wraps the `ZSTD_DCtx_refPrefix()` function.\n    pub fn ref_prefix<'b>(&mut self, prefix: &'b [u8]) -> SafeResult\n    where\n        'b: 'a,\n    {\n        parse_code(unsafe {\n            zstd_sys::ZSTD_DCtx_refPrefix(\n                self.0.as_ptr(),\n                ptr_void(prefix),\n                prefix.len(),\n            )\n        })\n    }\n\n    /// Sets a decompression parameter.\n    pub fn set_parameter(&mut self, param: DParameter) -> SafeResult {\n        #[cfg(feature = \"experimental\")]\n        use zstd_sys::ZSTD_dParameter::{\n            ZSTD_d_experimentalParam1 as ZSTD_d_format,\n            ZSTD_d_experimentalParam2 as ZSTD_d_stableOutBuffer,\n            ZSTD_d_experimentalParam3 as ZSTD_d_forceIgnoreChecksum,\n            ZSTD_d_experimentalParam4 as ZSTD_d_refMultipleDDicts,\n        };\n\n        use zstd_sys::ZSTD_dParameter::*;\n        use DParameter::*;\n\n        let (param, value) = match param {\n            #[cfg(feature = \"experimental\")]\n            Format(format) => (ZSTD_d_format, format as c_int),\n            #[cfg(feature = \"experimental\")]\n            StableOutBuffer(stable) => {\n                (ZSTD_d_stableOutBuffer, stable as c_int)\n            }\n            #[cfg(feature = \"experimental\")]\n            ForceIgnoreChecksum(force) => {\n                (ZSTD_d_forceIgnoreChecksum, force as c_int)\n            }\n            #[cfg(feature = \"experimental\")]\n            RefMultipleDDicts(value) => {\n                (ZSTD_d_refMultipleDDicts, value as c_int)\n            }\n\n            WindowLogMax(value) => (ZSTD_d_windowLogMax, value as c_int),\n        };\n\n        parse_code(unsafe {\n            zstd_sys::ZSTD_DCtx_setParameter(self.0.as_ptr(), param, value)\n        })\n    }\n\n    /// Performs a step of a streaming decompression operation.\n    ///\n    /// This will read some data from `input` and/or write some data to `output`.\n    ///\n    /// # Returns\n    ///\n    /// * `Ok(0)` if the current frame just finished decompressing successfully.\n    /// * `Ok(hint)` with a hint for the \"ideal\" amount of input data to provide in the next call.\n    ///     Can be safely ignored.\n    ///\n    /// Wraps the `ZSTD_decompressStream()` function.\n    pub fn decompress_stream<C: WriteBuf + ?Sized>(\n        &mut self,\n        output: &mut OutBuffer<'_, C>,\n        input: &mut InBuffer<'_>,\n    ) -> SafeResult {\n        let mut output = output.wrap();\n        let mut input = input.wrap();\n        let code = unsafe {\n            zstd_sys::ZSTD_decompressStream(\n                self.0.as_ptr(),\n                ptr_mut(&mut output),\n                ptr_mut(&mut input),\n            )\n        };\n        parse_code(code)\n    }\n\n    /// Wraps the `ZSTD_DStreamInSize()` function.\n    ///\n    /// Returns a hint for the recommended size of the input buffer for decompression.\n    pub fn in_size() -> usize {\n        unsafe { zstd_sys::ZSTD_DStreamInSize() }\n    }\n\n    /// Wraps the `ZSTD_DStreamOutSize()` function.\n    ///\n    /// Returns a hint for the recommended size of the output buffer for decompression.\n    pub fn out_size() -> usize {\n        unsafe { zstd_sys::ZSTD_DStreamOutSize() }\n    }\n\n    /// Wraps the `ZSTD_sizeof_DCtx()` function.\n    pub fn sizeof(&self) -> usize {\n        unsafe { zstd_sys::ZSTD_sizeof_DCtx(self.0.as_ptr()) }\n    }\n\n    /// Wraps the `ZSTD_decompressBlock()` function.\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    pub fn decompress_block<C: WriteBuf + ?Sized>(\n        &mut self,\n        dst: &mut C,\n        src: &[u8],\n    ) -> SafeResult {\n        unsafe {\n            dst.write_from(|buffer, capacity| {\n                parse_code(zstd_sys::ZSTD_decompressBlock(\n                    self.0.as_ptr(),\n                    buffer,\n                    capacity,\n                    ptr_void(src),\n                    src.len(),\n                ))\n            })\n        }\n    }\n\n    /// Wraps the `ZSTD_insertBlock()` function.\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    pub fn insert_block(&mut self, block: &[u8]) -> usize {\n        unsafe {\n            zstd_sys::ZSTD_insertBlock(\n                self.0.as_ptr(),\n                ptr_void(block),\n                block.len(),\n            )\n        }\n    }\n\n    /// Creates a copy of this context.\n    ///\n    /// This only works before any data has been decompressed. An error will be\n    /// returned otherwise.\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    pub fn try_clone(&self) -> Result<Self, ErrorCode> {\n        let context = NonNull::new(unsafe { zstd_sys::ZSTD_createDCtx() })\n            .ok_or(0usize)?;\n\n        unsafe { zstd_sys::ZSTD_copyDCtx(context.as_ptr(), self.0.as_ptr()) };\n\n        Ok(DCtx(context, self.1))\n    }\n}\n\nimpl Drop for DCtx<'_> {\n    fn drop(&mut self) {\n        unsafe {\n            zstd_sys::ZSTD_freeDCtx(self.0.as_ptr());\n        }\n    }\n}\n\nunsafe impl Send for DCtx<'_> {}\n// Non thread-safe methods already take `&mut self`, so it's fine to implement Sync here.\nunsafe impl Sync for DCtx<'_> {}\n\n/// Compression dictionary.\npub struct CDict<'a>(NonNull<zstd_sys::ZSTD_CDict>, PhantomData<&'a ()>);\n\nimpl CDict<'static> {\n    /// Prepare a dictionary to compress data.\n    ///\n    /// This will make it easier for compression contexts to load this dictionary.\n    ///\n    /// The dictionary content will be copied internally, and does not need to be kept around.\n    ///\n    /// # Panics\n    ///\n    /// If loading this dictionary failed.\n    pub fn create(\n        dict_buffer: &[u8],\n        compression_level: CompressionLevel,\n    ) -> Self {\n        Self::try_create(dict_buffer, compression_level)\n            .expect(\"zstd returned null pointer when creating dict\")\n    }\n\n    /// Prepare a dictionary to compress data.\n    ///\n    /// This will make it easier for compression contexts to load this dictionary.\n    ///\n    /// The dictionary content will be copied internally, and does not need to be kept around.\n    pub fn try_create(\n        dict_buffer: &[u8],\n        compression_level: CompressionLevel,\n    ) -> Option<Self> {\n        Some(CDict(\n            NonNull::new(unsafe {\n                zstd_sys::ZSTD_createCDict(\n                    ptr_void(dict_buffer),\n                    dict_buffer.len(),\n                    compression_level,\n                )\n            })?,\n            PhantomData,\n        ))\n    }\n}\n\nimpl<'a> CDict<'a> {\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    pub fn create_by_reference(\n        dict_buffer: &'a [u8],\n        compression_level: CompressionLevel,\n    ) -> Self {\n        CDict(\n            NonNull::new(unsafe {\n                zstd_sys::ZSTD_createCDict_byReference(\n                    ptr_void(dict_buffer),\n                    dict_buffer.len(),\n                    compression_level,\n                )\n            })\n            .expect(\"zstd returned null pointer\"),\n            PhantomData,\n        )\n    }\n\n    /// Returns the _current_ memory usage of this dictionary.\n    ///\n    /// Note that this may change over time.\n    pub fn sizeof(&self) -> usize {\n        unsafe { zstd_sys::ZSTD_sizeof_CDict(self.0.as_ptr()) }\n    }\n\n    /// Returns the dictionary ID for this dict.\n    ///\n    /// Returns `None` if this dictionary is empty or invalid.\n    pub fn get_dict_id(&self) -> Option<NonZeroU32> {\n        NonZeroU32::new(unsafe {\n            zstd_sys::ZSTD_getDictID_fromCDict(self.0.as_ptr()) as u32\n        })\n    }\n}\n\n/// Wraps the `ZSTD_createCDict()` function.\npub fn create_cdict(\n    dict_buffer: &[u8],\n    compression_level: CompressionLevel,\n) -> CDict<'static> {\n    CDict::create(dict_buffer, compression_level)\n}\n\nimpl<'a> Drop for CDict<'a> {\n    fn drop(&mut self) {\n        unsafe {\n            zstd_sys::ZSTD_freeCDict(self.0.as_ptr());\n        }\n    }\n}\n\nunsafe impl<'a> Send for CDict<'a> {}\nunsafe impl<'a> Sync for CDict<'a> {}\n\n/// Wraps the `ZSTD_compress_usingCDict()` function.\npub fn compress_using_cdict(\n    cctx: &mut CCtx<'_>,\n    dst: &mut [u8],\n    src: &[u8],\n    cdict: &CDict<'_>,\n) -> SafeResult {\n    cctx.compress_using_cdict(dst, src, cdict)\n}\n\n/// A digested decompression dictionary.\npub struct DDict<'a>(NonNull<zstd_sys::ZSTD_DDict>, PhantomData<&'a ()>);\n\nimpl DDict<'static> {\n    pub fn create(dict_buffer: &[u8]) -> Self {\n        Self::try_create(dict_buffer)\n            .expect(\"zstd returned null pointer when creating dict\")\n    }\n\n    pub fn try_create(dict_buffer: &[u8]) -> Option<Self> {\n        Some(DDict(\n            NonNull::new(unsafe {\n                zstd_sys::ZSTD_createDDict(\n                    ptr_void(dict_buffer),\n                    dict_buffer.len(),\n                )\n            })?,\n            PhantomData,\n        ))\n    }\n}\n\nimpl<'a> DDict<'a> {\n    pub fn sizeof(&self) -> usize {\n        unsafe { zstd_sys::ZSTD_sizeof_DDict(self.0.as_ptr()) }\n    }\n\n    /// Wraps the `ZSTD_createDDict_byReference()` function.\n    ///\n    /// The dictionary will keep referencing `dict_buffer`.\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    pub fn create_by_reference(dict_buffer: &'a [u8]) -> Self {\n        DDict(\n            NonNull::new(unsafe {\n                zstd_sys::ZSTD_createDDict_byReference(\n                    ptr_void(dict_buffer),\n                    dict_buffer.len(),\n                )\n            })\n            .expect(\"zstd returned null pointer\"),\n            PhantomData,\n        )\n    }\n\n    /// Returns the dictionary ID for this dict.\n    ///\n    /// Returns `None` if this dictionary is empty or invalid.\n    pub fn get_dict_id(&self) -> Option<NonZeroU32> {\n        NonZeroU32::new(unsafe {\n            zstd_sys::ZSTD_getDictID_fromDDict(self.0.as_ptr()) as u32\n        })\n    }\n}\n\n/// Wraps the `ZSTD_createDDict()` function.\n///\n/// It copies the dictionary internally, so the resulting `DDict` is `'static`.\npub fn create_ddict(dict_buffer: &[u8]) -> DDict<'static> {\n    DDict::create(dict_buffer)\n}\n\nimpl<'a> Drop for DDict<'a> {\n    fn drop(&mut self) {\n        unsafe {\n            zstd_sys::ZSTD_freeDDict(self.0.as_ptr());\n        }\n    }\n}\n\nunsafe impl<'a> Send for DDict<'a> {}\nunsafe impl<'a> Sync for DDict<'a> {}\n\n/// A shared thread pool for one or more compression contexts\n#[cfg(all(feature = \"experimental\", feature = \"zstdmt\"))]\n#[cfg_attr(\n    feature = \"doc-cfg\",\n    doc(cfg(all(feature = \"experimental\", feature = \"zstdmt\")))\n)]\npub struct ThreadPool(NonNull<zstd_sys::ZSTD_threadPool>);\n\n#[cfg(all(feature = \"experimental\", feature = \"zstdmt\"))]\n#[cfg_attr(\n    feature = \"doc-cfg\",\n    doc(cfg(all(feature = \"experimental\", feature = \"zstdmt\")))\n)]\nimpl ThreadPool {\n    /// Create a thread pool with the specified number of threads.\n    ///\n    /// # Panics\n    ///\n    /// If creating the thread pool failed.\n    pub fn new(num_threads: usize) -> Self {\n        Self::try_new(num_threads)\n            .expect(\"zstd returned null pointer when creating thread pool\")\n    }\n\n    /// Create a thread pool with the specified number of threads.\n    pub fn try_new(num_threads: usize) -> Option<Self> {\n        Some(Self(NonNull::new(unsafe {\n            zstd_sys::ZSTD_createThreadPool(num_threads)\n        })?))\n    }\n}\n\n#[cfg(all(feature = \"experimental\", feature = \"zstdmt\"))]\n#[cfg_attr(\n    feature = \"doc-cfg\",\n    doc(cfg(all(feature = \"experimental\", feature = \"zstdmt\")))\n)]\nimpl Drop for ThreadPool {\n    fn drop(&mut self) {\n        unsafe {\n            zstd_sys::ZSTD_freeThreadPool(self.0.as_ptr());\n        }\n    }\n}\n\n#[cfg(all(feature = \"experimental\", feature = \"zstdmt\"))]\n#[cfg_attr(\n    feature = \"doc-cfg\",\n    doc(cfg(all(feature = \"experimental\", feature = \"zstdmt\")))\n)]\nunsafe impl Send for ThreadPool {}\n#[cfg(all(feature = \"experimental\", feature = \"zstdmt\"))]\n#[cfg_attr(\n    feature = \"doc-cfg\",\n    doc(cfg(all(feature = \"experimental\", feature = \"zstdmt\")))\n)]\nunsafe impl Sync for ThreadPool {}\n\n/// Wraps the `ZSTD_decompress_usingDDict()` function.\npub fn decompress_using_ddict(\n    dctx: &mut DCtx<'_>,\n    dst: &mut [u8],\n    src: &[u8],\n    ddict: &DDict<'_>,\n) -> SafeResult {\n    dctx.decompress_using_ddict(dst, src, ddict)\n}\n\n/// Compression stream.\n///\n/// Same as `CCtx`.\npub type CStream<'a> = CCtx<'a>;\n\n// CStream can't be shared across threads, so it does not implement Sync.\n\n/// Allocates a new `CStream`.\npub fn create_cstream<'a>() -> CStream<'a> {\n    CCtx::create()\n}\n\n/// Prepares an existing `CStream` for compression at the given level.\npub fn init_cstream(\n    zcs: &mut CStream<'_>,\n    compression_level: CompressionLevel,\n) -> SafeResult {\n    zcs.init(compression_level)\n}\n\n#[derive(Debug)]\n/// Wrapper around an input buffer.\n///\n/// Bytes will be read starting at `src[pos]`.\n///\n/// `pos` will be updated after reading.\npub struct InBuffer<'a> {\n    pub src: &'a [u8],\n    pub pos: usize,\n}\n\n/// Describe a bytes container, like `Vec<u8>`.\n///\n/// Represents a contiguous segment of allocated memory, a prefix of which is initialized.\n///\n/// It allows starting from an uninitializes chunk of memory and writing to it, progressively\n/// initializing it. No re-allocation typically occur after the initial creation.\n///\n/// The main implementors are:\n/// * `Vec<u8>` and similar structures. These hold both a length (initialized data) and a capacity\n///   (allocated memory).\n///\n///   Use `Vec::with_capacity` to create an empty `Vec` with non-zero capacity, and the length\n///   field will be updated to cover the data written to it (as long as it fits in the given\n///   capacity).\n/// * `[u8]` and `[u8; N]`. These must start already-initialized, and will not be resized. It will\n///   be up to the caller to only use the part that was written (as returned by the various writing\n///   operations).\n/// * `std::io::Cursor<T: WriteBuf>`. This will ignore data before the cursor's position, and\n///   append data after that.\npub unsafe trait WriteBuf {\n    /// Returns the valid data part of this container. Should only cover initialized data.\n    fn as_slice(&self) -> &[u8];\n\n    /// Returns the full capacity of this container. May include uninitialized data.\n    fn capacity(&self) -> usize;\n\n    /// Returns a pointer to the start of the data.\n    fn as_mut_ptr(&mut self) -> *mut u8;\n\n    /// Indicates that the first `n` bytes of the container have been written.\n    ///\n    /// Safety: this should only be called if the `n` first bytes of this buffer have actually been\n    /// initialized.\n    unsafe fn filled_until(&mut self, n: usize);\n\n    /// Call the given closure using the pointer and capacity from `self`.\n    ///\n    /// Assumes the given function returns a parseable code, which if valid, represents how many\n    /// bytes were written to `self`.\n    ///\n    /// The given closure must treat its first argument as pointing to potentially uninitialized\n    /// memory, and should not read from it.\n    ///\n    /// In addition, it must have written at least `n` bytes contiguously from this pointer, where\n    /// `n` is the returned value.\n    unsafe fn write_from<F>(&mut self, f: F) -> SafeResult\n    where\n        F: FnOnce(*mut c_void, usize) -> SafeResult,\n    {\n        let res = f(ptr_mut_void(self), self.capacity());\n        if let Ok(n) = res {\n            self.filled_until(n);\n        }\n        res\n    }\n}\n\n#[cfg(feature = \"std\")]\n#[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"std\")))]\nunsafe impl<T> WriteBuf for std::io::Cursor<T>\nwhere\n    T: WriteBuf,\n{\n    fn as_slice(&self) -> &[u8] {\n        &self.get_ref().as_slice()[self.position() as usize..]\n    }\n\n    fn capacity(&self) -> usize {\n        self.get_ref()\n            .capacity()\n            .saturating_sub(self.position() as usize)\n    }\n\n    fn as_mut_ptr(&mut self) -> *mut u8 {\n        let start = self.position() as usize;\n        assert!(start <= self.get_ref().capacity());\n        // Safety: start is still in the same memory allocation\n        unsafe { self.get_mut().as_mut_ptr().add(start) }\n    }\n\n    unsafe fn filled_until(&mut self, n: usize) {\n        // Early exit: `n = 0` does not indicate anything.\n        if n == 0 {\n            return;\n        }\n\n        // Here we assume data _before_ self.position() was already initialized.\n        // Egh it's not actually guaranteed by Cursor? So let's guarantee it ourselves.\n        // Since the cursor wraps another `WriteBuf`, we know how much data is initialized there.\n        let position = self.position() as usize;\n        let initialized = self.get_ref().as_slice().len();\n        if let Some(uninitialized) = position.checked_sub(initialized) {\n            // Here, the cursor is further than the known-initialized part.\n            // Cursor's solution is to pad with zeroes, so let's do the same.\n            // We'll zero bytes from the end of valid data (as_slice().len()) to the cursor position.\n\n            // Safety:\n            // * We know `n > 0`\n            // * This means `self.capacity() > 0` (promise by the caller)\n            // * This means `self.get_ref().capacity() > self.position`\n            // * This means that `position` is within the nested pointer's allocation.\n            // * Finally, `initialized + uninitialized = position`, so the entire byte\n            //   range here is within the allocation\n            unsafe {\n                self.get_mut()\n                    .as_mut_ptr()\n                    .add(initialized)\n                    .write_bytes(0u8, uninitialized)\n            };\n        }\n\n        let start = self.position() as usize;\n        assert!(start + n <= self.get_ref().capacity());\n        self.get_mut().filled_until(start + n);\n    }\n}\n\n#[cfg(feature = \"std\")]\n#[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"std\")))]\nunsafe impl<'a> WriteBuf for &'a mut std::vec::Vec<u8> {\n    fn as_slice(&self) -> &[u8] {\n        std::vec::Vec::as_slice(self)\n    }\n\n    fn capacity(&self) -> usize {\n        std::vec::Vec::capacity(self)\n    }\n\n    fn as_mut_ptr(&mut self) -> *mut u8 {\n        std::vec::Vec::as_mut_ptr(self)\n    }\n\n    unsafe fn filled_until(&mut self, n: usize) {\n        std::vec::Vec::set_len(self, n)\n    }\n}\n\n#[cfg(feature = \"std\")]\n#[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"std\")))]\nunsafe impl WriteBuf for std::vec::Vec<u8> {\n    fn as_slice(&self) -> &[u8] {\n        &self[..]\n    }\n    fn capacity(&self) -> usize {\n        self.capacity()\n    }\n    fn as_mut_ptr(&mut self) -> *mut u8 {\n        self.as_mut_ptr()\n    }\n    unsafe fn filled_until(&mut self, n: usize) {\n        self.set_len(n);\n    }\n}\n\n#[cfg(feature = \"arrays\")]\n#[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"arrays\")))]\nunsafe impl<const N: usize> WriteBuf for [u8; N] {\n    fn as_slice(&self) -> &[u8] {\n        self\n    }\n    fn capacity(&self) -> usize {\n        self.len()\n    }\n\n    fn as_mut_ptr(&mut self) -> *mut u8 {\n        (&mut self[..]).as_mut_ptr()\n    }\n\n    unsafe fn filled_until(&mut self, _n: usize) {\n        // Assume the slice is already initialized\n    }\n}\n\nunsafe impl WriteBuf for [u8] {\n    fn as_slice(&self) -> &[u8] {\n        self\n    }\n    fn capacity(&self) -> usize {\n        self.len()\n    }\n\n    fn as_mut_ptr(&mut self) -> *mut u8 {\n        self.as_mut_ptr()\n    }\n\n    unsafe fn filled_until(&mut self, _n: usize) {\n        // Assume the slice is already initialized\n    }\n}\n\n/*\n// This is possible, but... why?\nunsafe impl<'a> WriteBuf for OutBuffer<'a, [u8]> {\n    fn as_slice(&self) -> &[u8] {\n        self.dst\n    }\n    fn capacity(&self) -> usize {\n        self.dst.len()\n    }\n    fn as_mut_ptr(&mut self) -> *mut u8 {\n        self.dst.as_mut_ptr()\n    }\n    unsafe fn filled_until(&mut self, n: usize) {\n        self.pos = n;\n    }\n}\n*/\n\n#[derive(Debug)]\n/// Wrapper around an output buffer.\n///\n/// `C` is usually either `[u8]` or `Vec<u8>`.\n///\n/// Bytes will be written starting at `dst[pos]`.\n///\n/// `pos` will be updated after writing.\n///\n/// # Invariant\n///\n/// `pos <= dst.capacity()`\npub struct OutBuffer<'a, C: WriteBuf + ?Sized> {\n    dst: &'a mut C,\n    pos: usize,\n}\n\n/// Convenience method to get a mut pointer from a mut ref.\nfn ptr_mut<B>(ptr_void: &mut B) -> *mut B {\n    ptr_void as *mut B\n}\n\n/// Interface between a C-level ZSTD_outBuffer and a rust-level `OutBuffer`.\n///\n/// Will update the parent buffer from the C buffer on drop.\nstruct OutBufferWrapper<'a, 'b, C: WriteBuf + ?Sized> {\n    buf: zstd_sys::ZSTD_outBuffer,\n    parent: &'a mut OutBuffer<'b, C>,\n}\n\nimpl<'a, 'b: 'a, C: WriteBuf + ?Sized> Deref for OutBufferWrapper<'a, 'b, C> {\n    type Target = zstd_sys::ZSTD_outBuffer;\n\n    fn deref(&self) -> &Self::Target {\n        &self.buf\n    }\n}\n\nimpl<'a, 'b: 'a, C: WriteBuf + ?Sized> DerefMut\n    for OutBufferWrapper<'a, 'b, C>\n{\n    fn deref_mut(&mut self) -> &mut Self::Target {\n        &mut self.buf\n    }\n}\n\nimpl<'a, C: WriteBuf + ?Sized> OutBuffer<'a, C> {\n    /// Returns a new `OutBuffer` around the given slice.\n    ///\n    /// Starts with `pos = 0`.\n    pub fn around(dst: &'a mut C) -> Self {\n        OutBuffer { dst, pos: 0 }\n    }\n\n    /// Returns a new `OutBuffer` around the given slice, starting at the given position.\n    ///\n    /// # Panics\n    ///\n    /// If `pos > dst.capacity()`.\n    pub fn around_pos(dst: &'a mut C, pos: usize) -> Self {\n        if pos > dst.capacity() {\n            panic!(\"Given position outside of the buffer bounds.\");\n        }\n\n        OutBuffer { dst, pos }\n    }\n\n    /// Returns the current cursor position.\n    ///\n    /// Guaranteed to be <= self.capacity()\n    pub fn pos(&self) -> usize {\n        assert!(self.pos <= self.dst.capacity());\n        self.pos\n    }\n\n    /// Returns the capacity of the underlying buffer.\n    pub fn capacity(&self) -> usize {\n        self.dst.capacity()\n    }\n\n    /// Sets the new cursor position.\n    ///\n    /// # Panics\n    ///\n    /// If `pos > self.dst.capacity()`.\n    ///\n    /// # Safety\n    ///\n    /// Data up to `pos` must have actually been written to.\n    pub unsafe fn set_pos(&mut self, pos: usize) {\n        if pos > self.dst.capacity() {\n            panic!(\"Given position outside of the buffer bounds.\");\n        }\n\n        self.dst.filled_until(pos);\n\n        self.pos = pos;\n    }\n\n    fn wrap<'b>(&'b mut self) -> OutBufferWrapper<'b, 'a, C> {\n        OutBufferWrapper {\n            buf: zstd_sys::ZSTD_outBuffer {\n                dst: ptr_mut_void(self.dst),\n                size: self.dst.capacity(),\n                pos: self.pos,\n            },\n            parent: self,\n        }\n    }\n\n    /// Returns the part of this buffer that was written to.\n    pub fn as_slice<'b>(&'b self) -> &'a [u8]\n    where\n        'b: 'a,\n    {\n        let pos = self.pos;\n        &self.dst.as_slice()[..pos]\n    }\n\n    /// Returns a pointer to the start of this buffer.\n    pub fn as_mut_ptr(&mut self) -> *mut u8 {\n        self.dst.as_mut_ptr()\n    }\n}\n\nimpl<'a, 'b, C: WriteBuf + ?Sized> Drop for OutBufferWrapper<'a, 'b, C> {\n    fn drop(&mut self) {\n        // Safe because we guarantee that data until `self.buf.pos` has been written.\n        unsafe { self.parent.set_pos(self.buf.pos) };\n    }\n}\n\nstruct InBufferWrapper<'a, 'b> {\n    buf: zstd_sys::ZSTD_inBuffer,\n    parent: &'a mut InBuffer<'b>,\n}\n\nimpl<'a, 'b: 'a> Deref for InBufferWrapper<'a, 'b> {\n    type Target = zstd_sys::ZSTD_inBuffer;\n\n    fn deref(&self) -> &Self::Target {\n        &self.buf\n    }\n}\n\nimpl<'a, 'b: 'a> DerefMut for InBufferWrapper<'a, 'b> {\n    fn deref_mut(&mut self) -> &mut Self::Target {\n        &mut self.buf\n    }\n}\n\nimpl<'a> InBuffer<'a> {\n    /// Returns a new `InBuffer` around the given slice.\n    ///\n    /// Starts with `pos = 0`.\n    pub fn around(src: &'a [u8]) -> Self {\n        InBuffer { src, pos: 0 }\n    }\n\n    /// Returns the current cursor position.\n    pub fn pos(&self) -> usize {\n        self.pos\n    }\n\n    /// Sets the new cursor position.\n    ///\n    /// # Panics\n    ///\n    /// If `pos > self.src.len()`.\n    pub fn set_pos(&mut self, pos: usize) {\n        if pos > self.src.len() {\n            panic!(\"Given position outside of the buffer bounds.\");\n        }\n        self.pos = pos;\n    }\n\n    fn wrap<'b>(&'b mut self) -> InBufferWrapper<'b, 'a> {\n        InBufferWrapper {\n            buf: zstd_sys::ZSTD_inBuffer {\n                src: ptr_void(self.src),\n                size: self.src.len(),\n                pos: self.pos,\n            },\n            parent: self,\n        }\n    }\n}\n\nimpl<'a, 'b> Drop for InBufferWrapper<'a, 'b> {\n    fn drop(&mut self) {\n        self.parent.set_pos(self.buf.pos);\n    }\n}\n\n/// A Decompression stream.\n///\n/// Same as `DCtx`.\npub type DStream<'a> = DCtx<'a>;\n\n// Some functions work on a \"frame prefix\".\n// TODO: Define `struct FramePrefix(&[u8]);` and move these functions to it?\n//\n// Some other functions work on a dictionary (not CDict or DDict).\n// Same thing?\n\n/// Wraps the `ZSTD_findFrameCompressedSize()` function.\n///\n/// `src` should contain at least an entire frame.\npub fn find_frame_compressed_size(src: &[u8]) -> SafeResult {\n    let code = unsafe {\n        zstd_sys::ZSTD_findFrameCompressedSize(ptr_void(src), src.len())\n    };\n    parse_code(code)\n}\n\n/// Wraps the `ZSTD_getFrameContentSize()` function.\n///\n/// Args:\n/// * `src`: A prefix of the compressed frame. It should at least include the frame header.\n///\n/// Returns:\n/// * `Err(ContentSizeError)` if `src` is too small of a prefix, or if it appears corrupted.\n/// * `Ok(None)` if the frame does not include a content size.\n/// * `Ok(Some(content_size_in_bytes))` otherwise.\npub fn get_frame_content_size(\n    src: &[u8],\n) -> Result<Option<u64>, ContentSizeError> {\n    parse_content_size(unsafe {\n        zstd_sys::ZSTD_getFrameContentSize(ptr_void(src), src.len())\n    })\n}\n\n/// Wraps the `ZSTD_findDecompressedSize()` function.\n///\n/// `src` should be exactly a sequence of ZSTD frames.\n#[cfg(feature = \"experimental\")]\n#[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\npub fn find_decompressed_size(\n    src: &[u8],\n) -> Result<Option<u64>, ContentSizeError> {\n    parse_content_size(unsafe {\n        zstd_sys::ZSTD_findDecompressedSize(ptr_void(src), src.len())\n    })\n}\n\n/// Wraps the `ZSTD_isFrame()` function.\n#[cfg(feature = \"experimental\")]\n#[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\npub fn is_frame(buffer: &[u8]) -> bool {\n    unsafe { zstd_sys::ZSTD_isFrame(ptr_void(buffer), buffer.len()) > 0 }\n}\n\n/// Wraps the `ZSTD_getDictID_fromDict()` function.\n///\n/// Returns `None` if the dictionary is not a valid zstd dictionary.\npub fn get_dict_id_from_dict(dict: &[u8]) -> Option<NonZeroU32> {\n    NonZeroU32::new(unsafe {\n        zstd_sys::ZSTD_getDictID_fromDict(ptr_void(dict), dict.len()) as u32\n    })\n}\n\n/// Wraps the `ZSTD_getDictID_fromFrame()` function.\n///\n/// Returns `None` if the dictionary ID could not be decoded. This may happen if:\n/// * The frame was not encoded with a dictionary.\n/// * The frame intentionally did not include dictionary ID.\n/// * The dictionary was non-conformant.\n/// * `src` is too small and does not include the frame header.\n/// * `src` is not a valid zstd frame prefix.\npub fn get_dict_id_from_frame(src: &[u8]) -> Option<NonZeroU32> {\n    NonZeroU32::new(unsafe {\n        zstd_sys::ZSTD_getDictID_fromFrame(ptr_void(src), src.len()) as u32\n    })\n}\n\n/// What kind of context reset should be applied.\npub enum ResetDirective {\n    /// Only the session will be reset.\n    ///\n    /// All parameters will be preserved (including the dictionary).\n    /// But any frame being processed will be dropped.\n    ///\n    /// It can be useful to start re-using a context after an error or when an\n    /// ongoing compression is no longer needed.\n    SessionOnly,\n\n    /// Only reset parameters (including dictionary or referenced prefix).\n    ///\n    /// All parameters will be reset to default values.\n    ///\n    /// This can only be done between sessions - no compression or decompression must be ongoing.\n    Parameters,\n\n    /// Reset both the session and parameters.\n    ///\n    /// The result is similar to a newly created context.\n    SessionAndParameters,\n}\n\nimpl ResetDirective {\n    fn as_sys(self) -> zstd_sys::ZSTD_ResetDirective {\n        match self {\n            ResetDirective::SessionOnly => zstd_sys::ZSTD_ResetDirective::ZSTD_reset_session_only,\n            ResetDirective::Parameters => zstd_sys::ZSTD_ResetDirective::ZSTD_reset_parameters,\n            ResetDirective::SessionAndParameters => zstd_sys::ZSTD_ResetDirective::ZSTD_reset_session_and_parameters,\n        }\n    }\n}\n\n#[cfg(feature = \"experimental\")]\n#[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n#[derive(Copy, Clone, Debug, PartialEq, Eq)]\n#[repr(u32)]\npub enum FrameFormat {\n    /// Regular zstd format.\n    One = zstd_sys::ZSTD_format_e::ZSTD_f_zstd1 as u32,\n\n    /// Skip the 4 bytes identifying the content as zstd-compressed data.\n    Magicless = zstd_sys::ZSTD_format_e::ZSTD_f_zstd1_magicless as u32,\n}\n\n#[cfg(feature = \"experimental\")]\n#[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n#[derive(Copy, Clone, Debug, PartialEq, Eq)]\n#[repr(u32)]\npub enum DictAttachPref {\n    DefaultAttach =\n        zstd_sys::ZSTD_dictAttachPref_e::ZSTD_dictDefaultAttach as u32,\n    ForceAttach = zstd_sys::ZSTD_dictAttachPref_e::ZSTD_dictForceAttach as u32,\n    ForceCopy = zstd_sys::ZSTD_dictAttachPref_e::ZSTD_dictForceCopy as u32,\n    ForceLoad = zstd_sys::ZSTD_dictAttachPref_e::ZSTD_dictForceLoad as u32,\n}\n\n#[cfg(feature = \"experimental\")]\n#[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n#[derive(Copy, Clone, Debug, PartialEq, Eq)]\n#[repr(u32)]\npub enum ParamSwitch {\n    Auto = zstd_sys::ZSTD_ParamSwitch_e::ZSTD_ps_auto as u32,\n    Enable = zstd_sys::ZSTD_ParamSwitch_e::ZSTD_ps_enable as u32,\n    Disable = zstd_sys::ZSTD_ParamSwitch_e::ZSTD_ps_disable as u32,\n}\n\n/// A compression parameter.\n#[derive(Copy, Clone, Debug, PartialEq, Eq)]\n#[non_exhaustive]\npub enum CParameter {\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    RSyncable(bool),\n\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    Format(FrameFormat),\n\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    ForceMaxWindow(bool),\n\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    ForceAttachDict(DictAttachPref),\n\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    LiteralCompressionMode(ParamSwitch),\n\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    SrcSizeHint(u32),\n\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    EnableDedicatedDictSearch(bool),\n\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    StableInBuffer(bool),\n\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    StableOutBuffer(bool),\n\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    BlockDelimiters(bool),\n\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    ValidateSequences(bool),\n\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    UseBlockSplitter(ParamSwitch),\n\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    UseRowMatchFinder(ParamSwitch),\n\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    DeterministicRefPrefix(bool),\n\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    PrefetchCDictTables(ParamSwitch),\n\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    EnableSeqProducerFallback(bool),\n\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    MaxBlockSize(u32),\n\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    SearchForExternalRepcodes(ParamSwitch),\n\n    /// Target CBlock size.\n    ///\n    /// Tries to make compressed blocks fit in this size (not a guarantee, just a target).\n    /// Useful to reduce end-to-end latency in low-bandwidth environments.\n    ///\n    /// No target when the value is 0.\n    TargetCBlockSize(u32),\n\n    /// Compression level to use.\n    ///\n    /// Compression levels are global presets for the other compression parameters.\n    CompressionLevel(CompressionLevel),\n\n    /// Maximum allowed back-reference distance.\n    ///\n    /// The actual distance is 2 power \"this value\".\n    WindowLog(u32),\n\n    HashLog(u32),\n\n    ChainLog(u32),\n\n    SearchLog(u32),\n\n    MinMatch(u32),\n\n    TargetLength(u32),\n\n    Strategy(Strategy),\n\n    EnableLongDistanceMatching(bool),\n\n    LdmHashLog(u32),\n\n    LdmMinMatch(u32),\n\n    LdmBucketSizeLog(u32),\n\n    LdmHashRateLog(u32),\n\n    ContentSizeFlag(bool),\n\n    ChecksumFlag(bool),\n\n    DictIdFlag(bool),\n\n    /// How many threads will be spawned.\n    ///\n    /// With a default value of `0`, `compress_stream*` functions block until they complete.\n    ///\n    /// With any other value (including 1, a single compressing thread), these methods directly\n    /// return, and the actual compression is done in the background (until a flush is requested).\n    ///\n    /// Note: this will only work if the `zstdmt` feature is activated.\n    NbWorkers(u32),\n\n    /// Size in bytes of a compression job.\n    ///\n    /// Does not have any effect when `NbWorkers` is set to 0.\n    ///\n    /// The default value of 0 finds the best job size based on the compression parameters.\n    ///\n    /// Note: this will only work if the `zstdmt` feature is activated.\n    JobSize(u32),\n\n    /// Specifies how much overlap must be given to each worker.\n    ///\n    /// Possible values:\n    ///\n    /// * `0` (default value): automatic overlap based on compression strategy.\n    /// * `1`: No overlap\n    /// * `1 < n < 9`: Overlap a fraction of the window size, defined as `1/(2 ^ 9-n)`.\n    /// * `9`: Full overlap (as long as the window)\n    /// * `9 < m`: Will return an error.\n    ///\n    /// Note: this will only work if the `zstdmt` feature is activated.\n    OverlapSizeLog(u32),\n}\n\n/// A decompression parameter.\n#[derive(Copy, Clone, Debug, PartialEq, Eq)]\n#[non_exhaustive]\npub enum DParameter {\n    WindowLogMax(u32),\n\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    /// See `FrameFormat`.\n    Format(FrameFormat),\n\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    StableOutBuffer(bool),\n\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    ForceIgnoreChecksum(bool),\n\n    #[cfg(feature = \"experimental\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\n    RefMultipleDDicts(bool),\n}\n\n/// Wraps the `ZDICT_trainFromBuffer()` function.\n#[cfg(feature = \"zdict_builder\")]\n#[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"zdict_builder\")))]\npub fn train_from_buffer<C: WriteBuf + ?Sized>(\n    dict_buffer: &mut C,\n    samples_buffer: &[u8],\n    samples_sizes: &[usize],\n) -> SafeResult {\n    assert_eq!(samples_buffer.len(), samples_sizes.iter().sum());\n\n    unsafe {\n        dict_buffer.write_from(|buffer, capacity| {\n            parse_code(zstd_sys::ZDICT_trainFromBuffer(\n                buffer,\n                capacity,\n                ptr_void(samples_buffer),\n                samples_sizes.as_ptr(),\n                samples_sizes.len() as u32,\n            ))\n        })\n    }\n}\n\n/// Wraps the `ZDICT_getDictID()` function.\n#[cfg(feature = \"zdict_builder\")]\n#[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"zdict_builder\")))]\npub fn get_dict_id(dict_buffer: &[u8]) -> Option<NonZeroU32> {\n    NonZeroU32::new(unsafe {\n        zstd_sys::ZDICT_getDictID(ptr_void(dict_buffer), dict_buffer.len())\n    })\n}\n\n/// Wraps the `ZSTD_getBlockSize()` function.\n#[cfg(feature = \"experimental\")]\n#[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\npub fn get_block_size(cctx: &CCtx) -> usize {\n    unsafe { zstd_sys::ZSTD_getBlockSize(cctx.0.as_ptr()) }\n}\n\n/// Wraps the `ZSTD_decompressBound` function\n#[cfg(feature = \"experimental\")]\n#[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\npub fn decompress_bound(data: &[u8]) -> Result<u64, ErrorCode> {\n    let bound =\n        unsafe { zstd_sys::ZSTD_decompressBound(ptr_void(data), data.len()) };\n    if is_error(bound as usize) {\n        Err(bound as usize)\n    } else {\n        Ok(bound)\n    }\n}\n\n/// Given a buffer of size `src_size`, returns the maximum number of sequences that can ge\n/// generated.\n#[cfg(feature = \"experimental\")]\n#[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\npub fn sequence_bound(src_size: usize) -> usize {\n    // Safety: Just FFI.\n    unsafe { zstd_sys::ZSTD_sequenceBound(src_size) }\n}\n\n/// Returns the minimum extra space when output and input buffer overlap.\n///\n/// When using in-place decompression, the output buffer must be at least this much bigger (in\n/// bytes) than the input buffer. The extra space must be at the front of the output buffer (the\n/// input buffer must be at the end of the output buffer).\n#[cfg(feature = \"experimental\")]\n#[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"experimental\")))]\npub fn decompression_margin(\n    compressed_data: &[u8],\n) -> Result<usize, ErrorCode> {\n    parse_code(unsafe {\n        zstd_sys::ZSTD_decompressionMargin(\n            ptr_void(compressed_data),\n            compressed_data.len(),\n        )\n    })\n}\n"
  },
  {
    "path": "zstd-safe/src/seekable.rs",
    "content": "//! The seekable format splits the compressed data into a series of \"frames\",\n//! each compressed individually so that decompression of a section in the\n//! middle of an archive only requires zstd to decompress at most a frame's\n//! worth of extra data, instead of the entire archive.\n\nuse core::{marker::PhantomData, ptr::NonNull};\n\nuse crate::{\n    parse_code, ptr_mut, ptr_void, CompressionLevel, InBuffer, OutBuffer,\n    SafeResult, WriteBuf, SEEKABLE_FRAMEINDEX_TOOLARGE,\n};\n\n/// Indicates that the passed frame index is too large.\n///\n/// This happens when `frame_index > num_frames()`.\n#[derive(Debug, PartialEq)]\npub struct FrameIndexTooLargeError;\n\nimpl core::fmt::Display for FrameIndexTooLargeError {\n    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {\n        f.write_str(\"Frame index too large\")\n    }\n}\n\n/// Required to tracking streaming operation.\n///\n/// Streaming objects are reusable to avoid allocation and deallocation,\n/// to start a new compression operation call `init()`.\npub struct SeekableCStream(NonNull<zstd_sys::ZSTD_seekable_CStream>);\n\nunsafe impl Send for SeekableCStream {}\nunsafe impl Sync for SeekableCStream {}\n\nimpl Default for SeekableCStream {\n    fn default() -> Self {\n        SeekableCStream::create()\n    }\n}\n\nimpl SeekableCStream {\n    /// Tries to create a new `SeekableCStream`.\n    ///\n    /// Returns `None` if zstd returns a NULL pointer - may happen if allocation fails.\n    pub fn try_create() -> Option<Self> {\n        // Safety: Just FFI\n        Some(SeekableCStream(NonNull::new(unsafe {\n            zstd_sys::ZSTD_seekable_createCStream()\n        })?))\n    }\n\n    /// Creates a new `SeekableCStream`.\n    ///\n    /// # Panics\n    ///\n    /// If zstd returns a NULL pointer.\n    pub fn create() -> Self {\n        Self::try_create()\n            .expect(\"zstd returned null pointer when creating new seekable compression stream\")\n    }\n\n    /// Wraps the `ZSTD_seekable_initCStream()` function.\n    ///\n    /// Call this to initialize a `SeekableCStream` object for a new compression operation.\n    /// - `max_frame_size` indicates the size at which to automatically start a new seekable\n    /// frame. `max_frame_size == 0` implies the default maximum size. Smaller frame sizes allow\n    /// faster decompression of small segments, since retrieving a single byte requires\n    /// decompression of the full frame where the byte belongs. In general, size the frames\n    /// to roughly correspond to the access granularity (when it's known). But small sizes\n    /// also reduce compression ratio. Avoid really tiny frame sizes (< 1 KB), that would\n    /// hurt compression ratio considerably.\n    /// - `checksum_flag` indicates whether or not the seek table should include frame\n    /// checksums on the uncompressed data for verification.\n    ///\n    /// Returns a size hint for input to provide for compression, or an error code.\n    pub fn init(\n        &mut self,\n        compression_level: CompressionLevel,\n        checksum_flag: bool,\n        max_frame_size: u32,\n    ) -> SafeResult {\n        // Safety: Just FFI\n        let code = unsafe {\n            zstd_sys::ZSTD_seekable_initCStream(\n                self.0.as_ptr(),\n                compression_level,\n                checksum_flag as i32,\n                max_frame_size,\n            )\n        };\n        parse_code(code)\n    }\n\n    /// Wraps the `ZSTD_seekable_compressStream()` function.\n    ///\n    /// Call this repetitively to consume input stream. The function will automatically\n    /// update both `pos` fields. Note that it may not consume the entire input, in which\n    /// case `pos < size`, and it's up to the caller to present again remaining data.\n    ///\n    /// Returns a size hint, preferred number of bytes to use as input for the next call\n    /// or an error code. Note that it's just a hint, to help latency a little, any other\n    /// value will work fine.\n    pub fn compress_stream<C: WriteBuf + ?Sized>(\n        &mut self,\n        output: &mut OutBuffer<'_, C>,\n        input: &mut InBuffer<'_>,\n    ) -> SafeResult {\n        let mut output = output.wrap();\n        let mut input = input.wrap();\n        // Safety: Just FFI\n        let code = unsafe {\n            zstd_sys::ZSTD_seekable_compressStream(\n                self.0.as_ptr(),\n                ptr_mut(&mut output),\n                ptr_mut(&mut input),\n            )\n        };\n        parse_code(code)\n    }\n\n    /// Wraps the `ZSTD_seekable_endFrame()` function.\n    ///\n    /// Call this any time to end the current frame and start a new one.\n    pub fn end_frame<C: WriteBuf + ?Sized>(\n        &mut self,\n        output: &mut OutBuffer<'_, C>,\n    ) -> SafeResult {\n        let mut output = output.wrap();\n        // Safety: Just FFI\n        let code = unsafe {\n            zstd_sys::ZSTD_seekable_endFrame(\n                self.0.as_ptr(),\n                ptr_mut(&mut output),\n            )\n        };\n        parse_code(code)\n    }\n\n    /// Wraps the `ZSTD_seekable_endStream()` function.\n    ///\n    /// This will end the current frame, and then write the seek table so that\n    /// decompressors can efficiently find compressed frames.\n    ///\n    /// Returns a number > 0 if it was unable to flush all the necessary data to `output`.\n    /// In this case, it should be called again until all remaining data is flushed out and\n    /// 0 is returned.\n    pub fn end_stream<C: WriteBuf + ?Sized>(\n        &mut self,\n        output: &mut OutBuffer<'_, C>,\n    ) -> SafeResult {\n        let mut output = output.wrap();\n        // Safety: Just FFI\n        let code = unsafe {\n            zstd_sys::ZSTD_seekable_endStream(\n                self.0.as_ptr(),\n                ptr_mut(&mut output),\n            )\n        };\n        parse_code(code)\n    }\n}\n\nimpl Drop for SeekableCStream {\n    fn drop(&mut self) {\n        // Safety: Just FFI\n        unsafe {\n            zstd_sys::ZSTD_seekable_freeCStream(self.0.as_ptr());\n        }\n    }\n}\n\n/// Allows for the seek table to be constructed directly.\n///\n/// This table can then be appended to a file of concatenated frames. This allows the\n/// frames to be compressed independently, even in parallel, and compiled together\n/// afterward into a seekable archive.\npub struct FrameLog(NonNull<zstd_sys::ZSTD_frameLog>);\n\nunsafe impl Send for FrameLog {}\nunsafe impl Sync for FrameLog {}\n\nimpl FrameLog {\n    /// Tries to create a new `FrameLog`.\n    ///\n    /// Returns `None` if zstd returns a NULL pointer - may happen if allocation fails.\n    pub fn try_create(checksum_flag: bool) -> Option<Self> {\n        Some(FrameLog(\n            // Safety: Just FFI\n            NonNull::new(unsafe {\n                zstd_sys::ZSTD_seekable_createFrameLog(checksum_flag as i32)\n            })?,\n        ))\n    }\n\n    /// Creates a new `FrameLog`.\n    ///\n    /// # Panics\n    ///\n    /// If zstd returns a NULL pointer.\n    pub fn create(checksum_flag: bool) -> Self {\n        Self::try_create(checksum_flag)\n            .expect(\"Zstd returned null pointer when creating new frame log\")\n    }\n\n    /// Needs to be called once for each frame in the archive.\n    ///\n    /// If the `FrameLog` was created with `checksum_flag == false`, the `checksum` may be none\n    /// and any value assigned to it will be ignored. If the `FrameLog` was created with\n    /// `checksum_flag == true`, it should be the least significant 32 bits of the XXH64\n    /// hash of the uncompressed data.\n    pub fn log_frame(\n        &mut self,\n        compressed_size: u32,\n        decompressed_size: u32,\n        checksum: Option<u32>,\n    ) -> SafeResult {\n        // Safety: Just FFI\n        let code = unsafe {\n            zstd_sys::ZSTD_seekable_logFrame(\n                self.0.as_ptr(),\n                compressed_size,\n                decompressed_size,\n                checksum.unwrap_or_default(),\n            )\n        };\n        parse_code(code)\n    }\n\n    /// Writes the seek table to `output`.\n    ///\n    /// Returns 0 if the entire table was written. Otherwise, it will be equal to the number\n    /// of bytes left to write.\n    pub fn write_seek_table<C: WriteBuf + ?Sized>(\n        &mut self,\n        output: &mut OutBuffer<'_, C>,\n    ) -> SafeResult {\n        let mut output = output.wrap();\n        // Safety: Just FFI\n        let code = unsafe {\n            zstd_sys::ZSTD_seekable_writeSeekTable(\n                self.0.as_ptr(),\n                ptr_mut(&mut output),\n            )\n        };\n        parse_code(code)\n    }\n}\n\nimpl Drop for FrameLog {\n    fn drop(&mut self) {\n        // Safety: Just FFI\n        unsafe {\n            zstd_sys::ZSTD_seekable_freeFrameLog(self.0.as_ptr());\n        }\n    }\n}\n\n/// A seekable decompression object.\n///\n/// The lifetime references the potential buffer that holds the data of this seekable.\npub struct Seekable<'a>(NonNull<zstd_sys::ZSTD_seekable>, PhantomData<&'a ()>);\n\nunsafe impl Send for Seekable<'_> {}\nunsafe impl Sync for Seekable<'_> {}\n\nimpl Default for Seekable<'_> {\n    fn default() -> Self {\n        Seekable::create()\n    }\n}\n\nimpl<'a> Seekable<'a> {\n    /// Tries to create a new `Seekable`.\n    ///\n    /// Returns `None` if zstd returns a NULL pointer - may happen if allocation fails.\n    pub fn try_create() -> Option<Self> {\n        // Safety: Just FFI\n        Some(Seekable(\n            NonNull::new(unsafe { zstd_sys::ZSTD_seekable_create() })?,\n            PhantomData,\n        ))\n    }\n\n    /// Creates a new `Seekable`.\n    ///\n    /// # Panics\n    ///\n    /// If zstd returns a NULL pointer.\n    pub fn create() -> Self {\n        Self::try_create()\n            .expect(\"Zstd returned null pointer when creating new seekable\")\n    }\n\n    /// Initializes this `Seekable` with the the seek table provided in `src`.\n    ///\n    /// The data contained in `src` should be the entire seekable file, including the seek table.\n    /// Consider using `init_advanced()`, if it not feasible to have the entire seekable file in\n    /// memory.\n    pub fn init_buff(&mut self, src: &'a [u8]) -> SafeResult {\n        // Safety: Just FFI\n        let code = unsafe {\n            zstd_sys::ZSTD_seekable_initBuff(\n                self.0.as_ptr(),\n                ptr_void(src),\n                src.len(),\n            )\n        };\n\n        parse_code(code)\n    }\n\n    /// Decompresses the length of `dst` at decompressed offset `offset`.\n    ///\n    /// May have to decompress the entire prefix of the frame before the desired data if it has\n    /// not already processed this section. If this is called multiple times for a consecutive\n    /// range of data, it will efficiently retain the decompressor object and avoid\n    /// redecompressing frame prefixes.\n    ///\n    /// Returns the number of bytes decompressed, or an error code.\n    pub fn decompress<C: WriteBuf + ?Sized>(\n        &mut self,\n        dst: &mut C,\n        offset: u64,\n    ) -> SafeResult {\n        unsafe {\n            dst.write_from(|buffer, capacity| {\n                parse_code(zstd_sys::ZSTD_seekable_decompress(\n                    self.0.as_ptr(),\n                    buffer,\n                    capacity,\n                    offset,\n                ))\n            })\n        }\n    }\n\n    /// Decompresses the frame with index `frame_index` into `dst`.\n    ///\n    /// Returns an error if `frame_index` is larger than the value returned by `num_frames()`.\n    pub fn decompress_frame<C: WriteBuf + ?Sized>(\n        &mut self,\n        dst: &mut C,\n        frame_index: u32,\n    ) -> SafeResult {\n        unsafe {\n            dst.write_from(|buffer, capacity| {\n                parse_code(zstd_sys::ZSTD_seekable_decompressFrame(\n                    self.0.as_ptr(),\n                    buffer,\n                    capacity,\n                    frame_index,\n                ))\n            })\n        }\n    }\n\n    /// Get the number of frames of this seekable object.\n    ///\n    /// Returns `0` if the seekable is not initialized.\n    pub fn num_frames(&self) -> u32 {\n        unsafe { zstd_sys::ZSTD_seekable_getNumFrames(self.0.as_ptr()) }\n    }\n\n    /// Get the offset of the compressed frame.\n    ///\n    /// Returns an error if `frame_index` is out of range.\n    pub fn frame_compressed_offset(\n        &self,\n        frame_index: u32,\n    ) -> Result<u64, FrameIndexTooLargeError> {\n        let offset = unsafe {\n            zstd_sys::ZSTD_seekable_getFrameCompressedOffset(\n                self.0.as_ptr(),\n                frame_index,\n            )\n        };\n\n        if offset == SEEKABLE_FRAMEINDEX_TOOLARGE {\n            return Err(FrameIndexTooLargeError);\n        }\n\n        Ok(offset)\n    }\n\n    /// Get the offset of the decompressed frame.\n    ///\n    /// Returns an error if `frame_index` is out of range.\n    pub fn frame_decompressed_offset(\n        &self,\n        frame_index: u32,\n    ) -> Result<u64, FrameIndexTooLargeError> {\n        let offset = unsafe {\n            zstd_sys::ZSTD_seekable_getFrameDecompressedOffset(\n                self.0.as_ptr(),\n                frame_index,\n            )\n        };\n\n        if offset == SEEKABLE_FRAMEINDEX_TOOLARGE {\n            return Err(FrameIndexTooLargeError);\n        }\n\n        Ok(offset)\n    }\n\n    /// Get the size of the compressed frame.\n    ///\n    /// Returns an error if `frame_index` is out of range.\n    pub fn frame_compressed_size(&self, frame_index: u32) -> SafeResult {\n        let code = unsafe {\n            zstd_sys::ZSTD_seekable_getFrameCompressedSize(\n                self.0.as_ptr(),\n                frame_index,\n            )\n        };\n\n        parse_code(code)\n    }\n\n    /// Get the size of the decompressed frame.\n    ///\n    /// Returns an error if `frame_index` is out of range.\n    pub fn frame_decompressed_size(&self, frame_index: u32) -> SafeResult {\n        let code = unsafe {\n            zstd_sys::ZSTD_seekable_getFrameDecompressedSize(\n                self.0.as_ptr(),\n                frame_index,\n            )\n        };\n\n        parse_code(code)\n    }\n\n    /// Get the frame at the given offset.\n    pub fn offset_to_frame_index(&self, offset: u64) -> u32 {\n        unsafe {\n            zstd_sys::ZSTD_seekable_offsetToFrameIndex(self.0.as_ptr(), offset)\n        }\n    }\n}\n\nimpl<'a> Drop for Seekable<'a> {\n    fn drop(&mut self) {\n        // Safety: Just FFI\n        unsafe {\n            zstd_sys::ZSTD_seekable_free(self.0.as_ptr());\n        }\n    }\n}\n\n#[cfg(feature = \"std\")]\npub struct AdvancedSeekable<'a, F> {\n    inner: Seekable<'a>,\n    // We can't use Box<F> since it'd break rust aliasing rules when calling\n    // advanced_read/advanced_seek through the C code.\n    src: *mut F,\n}\n\n#[cfg(feature = \"std\")]\nunsafe impl<F> Send for AdvancedSeekable<'_, F> where F: Send {}\n#[cfg(feature = \"std\")]\nunsafe impl<F> Sync for AdvancedSeekable<'_, F> where F: Sync {}\n\n#[cfg(feature = \"std\")]\nimpl<'a, F> core::ops::Deref for AdvancedSeekable<'a, F> {\n    type Target = Seekable<'a>;\n\n    fn deref(&self) -> &Self::Target {\n        &self.inner\n    }\n}\n\n#[cfg(feature = \"std\")]\nimpl<'a, F> core::ops::DerefMut for AdvancedSeekable<'a, F> {\n    fn deref_mut(&mut self) -> &mut Self::Target {\n        &mut self.inner\n    }\n}\n\n#[cfg(feature = \"std\")]\nimpl<'a, F> Drop for AdvancedSeekable<'a, F> {\n    fn drop(&mut self) {\n        use std::boxed::Box;\n        // this drops the box\n        let _: Box<F> = unsafe { Box::from_raw(self.src) };\n    }\n}\n\nimpl<'a> Seekable<'a> {\n    /// A general API allowing the client to provide its own read and seek implementations.\n    ///\n    /// Initializes this seekable without having the complete compressed data in memory,\n    /// but seeks and reads `src` as required. Use this function if you are looking for\n    /// an alternative to the `ZSTD_seekable_initFile()` function.\n    #[cfg(feature = \"std\")]\n    #[cfg_attr(feature = \"doc-cfg\", doc(cfg(feature = \"std\")))]\n    pub fn init_advanced<F>(\n        self,\n        src: std::boxed::Box<F>,\n    ) -> Result<AdvancedSeekable<'a, F>, crate::ErrorCode>\n    where\n        F: std::io::Read + std::io::Seek,\n    {\n        let opaque = std::boxed::Box::into_raw(src) as *mut F;\n        let custom_file = zstd_sys::ZSTD_seekable_customFile {\n            opaque: opaque as *mut core::ffi::c_void,\n            read: Some(advanced_read::<F>),\n            seek: Some(advanced_seek::<F>),\n        };\n        // Safety: Just FFI\n        let code = unsafe {\n            zstd_sys::ZSTD_seekable_initAdvanced(self.0.as_ptr(), custom_file)\n        };\n\n        if crate::is_error(code) {\n            return Err(code);\n        }\n\n        Ok(AdvancedSeekable {\n            inner: self,\n            src: opaque,\n        })\n    }\n}\n\n/// Seeks the read head to `offset` from `origin`, where origin is either `SEEK_SET`\n/// (beginning of file), `SEEK_CUR` (current position) or `SEEK_END` (end of file),\n/// as defined in `stdio.h`.\n///\n/// Returns a non-negative value in case of success, and a negative value in case of failure.\n#[cfg(feature = \"std\")]\nunsafe extern \"C\" fn advanced_seek<S: std::io::Seek>(\n    opaque: *mut core::ffi::c_void,\n    offset: ::core::ffi::c_longlong,\n    origin: ::core::ffi::c_int,\n) -> ::core::ffi::c_int {\n    use core::convert::TryFrom;\n    use std::io::SeekFrom;\n\n    // as defined in stdio.h\n    const SEEK_SET: i32 = 0;\n    const SEEK_CUR: i32 = 1;\n    const SEEK_END: i32 = 2;\n\n    // Safety: The trait boundaries in `init_advanced()` ensure that `opaque` points to an S\n    let seeker: &mut S = std::mem::transmute(opaque);\n    let pos = match origin {\n        SEEK_SET => {\n            let Ok(offset) = u64::try_from(offset) else {\n                return -1;\n            };\n            SeekFrom::Start(offset)\n        }\n        SEEK_CUR => SeekFrom::Current(offset),\n        SEEK_END => SeekFrom::End(offset),\n        // not possible\n        _ => return -1,\n    };\n\n    if seeker.seek(pos).is_err() {\n        return -1;\n    }\n\n    0\n}\n\n/// Reads exactly `n` bytes into `buffer`.\n///\n/// Returns a non-negative value in case of success, and a negative value in case of failure.\n#[cfg(feature = \"std\")]\nunsafe extern \"C\" fn advanced_read<R: std::io::Read>(\n    opaque: *mut core::ffi::c_void,\n    buffer: *mut core::ffi::c_void,\n    n: usize,\n) -> ::core::ffi::c_int {\n    // Safety: The trait boundaries in `init_advanced()` ensure that `opaque` points to a R\n    let reader: &mut R = std::mem::transmute(opaque);\n    // Safety: zstd ensures the buffer is allocated and safe to use\n    let mut buf = std::slice::from_raw_parts_mut(buffer as *mut u8, n);\n    if reader.read_exact(&mut buf).is_err() {\n        return -1;\n    }\n\n    0\n}\n\n/// Indicates that the seek table could not be created.\n#[derive(Debug, PartialEq)]\npub struct SeekTableCreateError;\n\nimpl core::fmt::Display for SeekTableCreateError {\n    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {\n        f.write_str(\"Zstd returned null pointer when creating new seektable from seekable\")\n    }\n}\n\npub struct SeekTable(NonNull<zstd_sys::ZSTD_seekTable>);\n\nunsafe impl Send for SeekTable {}\nunsafe impl Sync for SeekTable {}\n\nimpl SeekTable {\n    /// Try to create a `SeekTable` from a `Seekable`.\n    ///\n    /// Memory constrained use cases that manage multiple archives benefit from retaining\n    /// multiple archive seek tables without retaining a `Seekable` instance for each.\n    pub fn try_from_seekable<'a>(\n        value: &Seekable<'a>,\n    ) -> Result<Self, SeekTableCreateError> {\n        // Safety: Just FFI\n        let ptr = unsafe {\n            zstd_sys::ZSTD_seekTable_create_fromSeekable(value.0.as_ptr())\n        };\n        let ptr = NonNull::new(ptr).ok_or(SeekTableCreateError)?;\n\n        Ok(Self(ptr))\n    }\n\n    /// Get the number of frames of the underlying seekable object.\n    pub fn num_frames(&self) -> u32 {\n        // Safety: Just FFI\n        unsafe { zstd_sys::ZSTD_seekTable_getNumFrames(self.0.as_ptr()) }\n    }\n\n    /// Get the offset of the compressed frame.\n    ///\n    /// Returns an error if `frame_index` is out of range.\n    pub fn frame_compressed_offset(\n        &self,\n        frame_index: u32,\n    ) -> Result<u64, FrameIndexTooLargeError> {\n        // Safety: Just FFI\n        let offset = unsafe {\n            zstd_sys::ZSTD_seekTable_getFrameCompressedOffset(\n                self.0.as_ptr(),\n                frame_index,\n            )\n        };\n\n        if offset == SEEKABLE_FRAMEINDEX_TOOLARGE {\n            return Err(FrameIndexTooLargeError);\n        }\n\n        Ok(offset)\n    }\n\n    /// Get the offset of the decompressed frame.\n    ///\n    /// Returns an error if `frame_index` is out of range.\n    pub fn frame_decompressed_offset(\n        &self,\n        frame_index: u32,\n    ) -> Result<u64, FrameIndexTooLargeError> {\n        // Safety: Just FFI\n        let offset = unsafe {\n            zstd_sys::ZSTD_seekTable_getFrameDecompressedOffset(\n                self.0.as_ptr(),\n                frame_index,\n            )\n        };\n\n        if offset == SEEKABLE_FRAMEINDEX_TOOLARGE {\n            return Err(FrameIndexTooLargeError);\n        }\n\n        Ok(offset)\n    }\n\n    /// Get the size of the compressed frame.\n    ///\n    /// Returns an error if `frame_index` is out of range.\n    pub fn frame_compressed_size(&self, frame_index: u32) -> SafeResult {\n        // Safety: Just FFI\n        let code = unsafe {\n            zstd_sys::ZSTD_seekTable_getFrameCompressedSize(\n                self.0.as_ptr(),\n                frame_index,\n            )\n        };\n\n        parse_code(code)\n    }\n\n    /// Get the size of the decompressed frame.\n    ///\n    /// Returns an error if `frame_index` is out of range.\n    pub fn frame_decompressed_size(&self, frame_index: u32) -> SafeResult {\n        // Safety: Just FFI\n        let code = unsafe {\n            zstd_sys::ZSTD_seekTable_getFrameDecompressedSize(\n                self.0.as_ptr(),\n                frame_index,\n            )\n        };\n\n        parse_code(code)\n    }\n\n    /// Get the frame at the given offset.\n    pub fn offset_to_frame_index(&self, offset: u64) -> u32 {\n        // Safety: Just FFI\n        unsafe {\n            zstd_sys::ZSTD_seekTable_offsetToFrameIndex(\n                self.0.as_ptr(),\n                offset,\n            )\n        }\n    }\n}\n\nimpl Drop for SeekTable {\n    fn drop(&mut self) {\n        // Safety: Just FFI\n        unsafe {\n            zstd_sys::ZSTD_seekTable_free(self.0.as_ptr());\n        }\n    }\n}\n"
  },
  {
    "path": "zstd-safe/src/tests.rs",
    "content": "extern crate std;\nuse crate as zstd_safe;\n\nuse self::std::vec::Vec;\n\nconst INPUT: &[u8] = b\"Rust is a multi-paradigm system programming language focused on safety, especially safe concurrency. Rust is syntactically similar to C++, but is designed to provide better memory safety while maintaining high performance.\";\nconst LONG_CONTENT: &str = include_str!(\"lib.rs\");\n\n#[cfg(feature = \"std\")]\n#[test]\nfn test_writebuf() {\n    use zstd_safe::WriteBuf;\n\n    let mut data = Vec::with_capacity(10);\n    unsafe {\n        data.write_from(|ptr, n| {\n            assert!(n >= 4);\n            let ptr = ptr as *mut u8;\n            ptr.write(0);\n            ptr.add(1).write(1);\n            ptr.add(2).write(2);\n            ptr.add(3).write(3);\n            Ok(4)\n        })\n    }\n    .unwrap();\n    assert_eq!(data.as_slice(), &[0, 1, 2, 3]);\n\n    let mut cursor = std::io::Cursor::new(&mut data);\n    // Here we use a position larger than the actual data.\n    // So expect the data to be zero-filled.\n    cursor.set_position(6);\n    unsafe {\n        cursor.write_from(|ptr, n| {\n            assert!(n >= 4);\n            let ptr = ptr as *mut u8;\n            ptr.write(4);\n            ptr.add(1).write(5);\n            ptr.add(2).write(6);\n            ptr.add(3).write(7);\n            Ok(4)\n        })\n    }\n    .unwrap();\n\n    assert_eq!(data.as_slice(), &[0, 1, 2, 3, 0, 0, 4, 5, 6, 7]);\n}\n\n#[cfg(feature = \"std\")]\n#[test]\nfn test_simple_cycle() {\n    let mut buffer = std::vec![0u8; 256];\n    let written = zstd_safe::compress(&mut buffer, INPUT, 3).unwrap();\n    let compressed = &buffer[..written];\n\n    let mut buffer = std::vec![0u8; 256];\n    let written = zstd_safe::decompress(&mut buffer, compressed).unwrap();\n    let decompressed = &buffer[..written];\n\n    assert_eq!(INPUT, decompressed);\n}\n\n#[test]\nfn test_cctx_cycle() {\n    let mut buffer = std::vec![0u8; 256];\n    let mut cctx = zstd_safe::CCtx::default();\n    let written = cctx.compress(&mut buffer[..], INPUT, 1).unwrap();\n    let compressed = &buffer[..written];\n\n    let mut dctx = zstd_safe::DCtx::default();\n    let mut buffer = std::vec![0u8; 256];\n    let written = dctx.decompress(&mut buffer[..], compressed).unwrap();\n    let decompressed = &buffer[..written];\n\n    assert_eq!(INPUT, decompressed);\n}\n\n#[test]\nfn test_dictionary() {\n    // Prepare some content to train the dictionary.\n    let bytes = LONG_CONTENT.as_bytes();\n    let line_sizes: Vec<usize> =\n        LONG_CONTENT.lines().map(|line| line.len() + 1).collect();\n\n    // Train the dictionary\n    let mut dict_buffer = std::vec![0u8; 100_000];\n    let written =\n        zstd_safe::train_from_buffer(&mut dict_buffer[..], bytes, &line_sizes)\n            .unwrap();\n    let dict_buffer = &dict_buffer[..written];\n\n    // Create pre-hashed dictionaries for (de)compression\n    let cdict = zstd_safe::create_cdict(dict_buffer, 3);\n    let ddict = zstd_safe::create_ddict(dict_buffer);\n\n    // Compress data\n    let mut cctx = zstd_safe::CCtx::default();\n    cctx.ref_cdict(&cdict).unwrap();\n\n    let mut buffer = std::vec![0u8; 1024 * 1024];\n    // First, try to compress without a dict\n    let big_written = zstd_safe::compress(&mut buffer[..], bytes, 3).unwrap();\n\n    let written = cctx\n        .compress2(&mut buffer[..], bytes)\n        .map_err(zstd_safe::get_error_name)\n        .unwrap();\n\n    assert!(big_written > written);\n    let compressed = &buffer[..written];\n\n    // Decompress data\n    let mut dctx = zstd_safe::DCtx::default();\n    dctx.ref_ddict(&ddict).unwrap();\n\n    let mut buffer = std::vec![0u8; 1024 * 1024];\n    let written = dctx\n        .decompress(&mut buffer[..], compressed)\n        .map_err(zstd_safe::get_error_name)\n        .unwrap();\n    let decompressed = &buffer[..written];\n\n    // Profit!\n    assert_eq!(bytes, decompressed);\n}\n\n#[test]\nfn test_checksum() {\n    let mut buffer = std::vec![0u8; 256];\n    let mut cctx = zstd_safe::CCtx::default();\n    cctx.set_parameter(zstd_safe::CParameter::ChecksumFlag(true))\n        .unwrap();\n    let written = cctx.compress2(&mut buffer[..], INPUT).unwrap();\n    let compressed = &mut buffer[..written];\n\n    let mut dctx = zstd_safe::DCtx::default();\n    let mut buffer = std::vec![0u8; 1024*1024];\n    let written = dctx\n        .decompress(&mut buffer[..], compressed)\n        .map_err(zstd_safe::get_error_name)\n        .unwrap();\n    let decompressed = &buffer[..written];\n\n    assert_eq!(INPUT, decompressed);\n\n    // Now try again with some corruption\n    // TODO: Find a mutation that _wouldn't_ be detected without checksums.\n    // (Most naive changes already trigger a \"corrupt block\" error.)\n    if let Some(last) = compressed.last_mut() {\n        *last = last.saturating_sub(1);\n    }\n    let err = dctx\n        .decompress(&mut buffer[..], compressed)\n        .map_err(zstd_safe::get_error_name)\n        .err()\n        .unwrap();\n    // The error message will complain about the checksum.\n    assert!(err.contains(\"checksum\"));\n}\n\n#[cfg(all(feature = \"experimental\", feature = \"std\"))]\n#[test]\nfn test_upper_bound() {\n    let mut buffer = std::vec![0u8; 256];\n\n    assert!(zstd_safe::decompress_bound(&buffer).is_err());\n\n    let written = zstd_safe::compress(&mut buffer, INPUT, 3).unwrap();\n    let compressed = &buffer[..written];\n\n    assert_eq!(\n        zstd_safe::decompress_bound(&compressed),\n        Ok(INPUT.len() as u64)\n    );\n}\n\n#[cfg(feature = \"seekable\")]\n#[test]\nfn test_seekable_cycle() {\n    let seekable_archive = new_seekable_archive(INPUT);\n    let mut seekable = crate::seekable::Seekable::create();\n    seekable\n        .init_buff(&seekable_archive)\n        .map_err(zstd_safe::get_error_name)\n        .unwrap();\n\n    decompress_seekable(&mut seekable);\n\n    // Check that the archive can also be decompressed by a regular function\n    let mut buffer = std::vec![0u8; 256];\n    let written = zstd_safe::decompress(&mut buffer[..], &seekable_archive)\n        .map_err(zstd_safe::get_error_name)\n        .unwrap();\n    let decompressed = &buffer[..written];\n    assert_eq!(INPUT, decompressed);\n\n    // Trigger FrameIndexTooLargeError\n    let frame_index = seekable.num_frames() + 1;\n    assert_eq!(\n        seekable.frame_compressed_offset(frame_index).unwrap_err(),\n        crate::seekable::FrameIndexTooLargeError\n    );\n}\n\n#[cfg(feature = \"seekable\")]\n#[test]\nfn test_seekable_seek_table() {\n    use crate::seekable::{FrameIndexTooLargeError, SeekTable, Seekable};\n\n    let seekable_archive = new_seekable_archive(INPUT);\n    let mut seekable = Seekable::create();\n\n    // Assert that creating a SeekTable from an uninitialized seekable errors.\n    // This led to segfaults with zstd versions prior v1.5.7\n    assert!(SeekTable::try_from_seekable(&seekable).is_err());\n\n    seekable\n        .init_buff(&seekable_archive)\n        .map_err(zstd_safe::get_error_name)\n        .unwrap();\n\n    // Try to create a seek table from the seekable\n    let seek_table =\n        { SeekTable::try_from_seekable(&seekable).unwrap() };\n\n    // Seekable and seek table should return the same results\n    assert_eq!(seekable.num_frames(), seek_table.num_frames());\n    assert_eq!(\n        seekable.frame_compressed_offset(2).unwrap(),\n        seek_table.frame_compressed_offset(2).unwrap()\n    );\n    assert_eq!(\n        seekable.frame_decompressed_offset(2).unwrap(),\n        seek_table.frame_decompressed_offset(2).unwrap()\n    );\n    assert_eq!(\n        seekable.frame_compressed_size(2).unwrap(),\n        seek_table.frame_compressed_size(2).unwrap()\n    );\n    assert_eq!(\n        seekable.frame_decompressed_size(2).unwrap(),\n        seek_table.frame_decompressed_size(2).unwrap()\n    );\n\n    // Trigger FrameIndexTooLargeError\n    let frame_index = seekable.num_frames() + 1;\n    assert_eq!(\n        seek_table.frame_compressed_offset(frame_index).unwrap_err(),\n        FrameIndexTooLargeError\n    );\n}\n\n#[cfg(all(feature = \"std\", feature = \"seekable\"))]\n#[test]\nfn test_seekable_advanced_cycle() {\n    use crate::seekable::Seekable;\n    use std::{boxed::Box, io::Cursor};\n\n    // Wrap the archive in a cursor that implements Read and Seek,\n    // a file would also work\n    let seekable_archive = Cursor::new(new_seekable_archive(INPUT));\n    let mut seekable = Seekable::create()\n        .init_advanced(Box::new(seekable_archive))\n        .map_err(zstd_safe::get_error_name)\n        .unwrap();\n\n    decompress_seekable(&mut seekable);\n}\n\n#[cfg(feature = \"seekable\")]\nfn new_seekable_archive(input: &[u8]) -> Vec<u8> {\n    use crate::{seekable::SeekableCStream, InBuffer, OutBuffer};\n\n    // Make sure the buffer is big enough\n    // The buffer needs to be bigger as the uncompressed data here as the seekable archive has\n    // more meta data than actual compressed data because the input is really small and we use\n    // a max_frame_size of 64, which is way to small for real-world usages.\n    let mut buffer = std::vec![0u8; 512];\n    let mut cstream = SeekableCStream::create();\n    cstream\n        .init(3, true, 64)\n        .map_err(zstd_safe::get_error_name)\n        .unwrap();\n    let mut in_buffer = InBuffer::around(input);\n    let mut out_buffer = OutBuffer::around(&mut buffer[..]);\n\n    // This could get stuck if the buffer is too small\n    while in_buffer.pos() < in_buffer.src.len() {\n        cstream\n            .compress_stream(&mut out_buffer, &mut in_buffer)\n            .map_err(zstd_safe::get_error_name)\n            .unwrap();\n    }\n\n    // Make sure everything is flushed to out_buffer\n    loop {\n        if cstream\n            .end_stream(&mut out_buffer)\n            .map_err(zstd_safe::get_error_name)\n            .unwrap()\n            == 0\n        {\n            break;\n        }\n    }\n\n    Vec::from(out_buffer.as_slice())\n}\n\n#[cfg(feature = \"seekable\")]\nfn decompress_seekable(seekable: &mut crate::seekable::Seekable<'_>) {\n    // Make the buffer as big as max_frame_size so it can hold a complete frame\n    let mut buffer = std::vec![0u8; 64];\n    // Decompress only the first frame\n    let written = seekable\n        .decompress(&mut buffer[..], 0)\n        .map_err(zstd_safe::get_error_name)\n        .unwrap();\n    let decompressed = &buffer[..written];\n    assert!(INPUT.starts_with(decompressed));\n    assert_eq!(decompressed.len(), 64);\n\n    // Make the buffer big enough to hold the complete input\n    let mut buffer = std::vec![0u8; 256];\n    // Decompress everything\n    let written = seekable\n        .decompress(&mut buffer[..], 0)\n        .map_err(zstd_safe::get_error_name)\n        .unwrap();\n    let decompressed = &buffer[..written];\n    assert_eq!(INPUT, decompressed);\n}\n"
  },
  {
    "path": "zstd-safe/update_consts.sh",
    "content": "#!/bin/bash\ndeclare -A varTypes\nvarTypes[CLEVEL_DEFAULT]=CompressionLevel\nvarTypes[CONTENTSIZE_UNKNOWN]=u64\nvarTypes[CONTENTSIZE_ERROR]=u64\n# This is originally (0ULL-2) and gets falsely translated to -2 by bindgen.\n# Casting it to u64 \"fixes\" this to the right number\nvarTypes[SEEKABLE_FRAMEINDEX_TOOLARGE]=u64\n\nheader() {\n    echo \"// This file has been generated by $0\"\n}\n\nfetch_constants() {\n    rg 'pub const ZSTD_' $1 | while read pub const var vartype eq value; do\n        vname=${var/:}\n        newname=${vname/ZSTD_}\n        vt=${varTypes[$newname]}\n        if [ -z \"$vt\" ]\n        then\n            echo \"pub const ${newname}: $vartype = zstd_sys::${vname};\"\n        else\n            echo \"pub const ${newname}: $vt = zstd_sys::${vname} as $vt;\"\n        fi\n    done | sort\n}\n\nconstants=$(fetch_constants zstd-sys/src/bindings_zstd.rs)\nheader > src/constants.rs\necho \"$constants\" >> src/constants.rs\n\n(\n    header\n    comm -23 <(fetch_constants zstd-sys/src/bindings_zstd_experimental.rs) <(echo \"$constants\")\n) > src/constants_experimental.rs\n\n(\n    header\n    comm -23 <(fetch_constants zstd-sys/src/bindings_zstd_seekable.rs) <(echo \"$constants\")\n) > src/constants_seekable.rs\n"
  },
  {
    "path": "zstd-safe/zstd-sys/Cargo.toml",
    "content": "[package]\nauthors = [\"Alexandre Bury <alexandre.bury@gmail.com>\"]\nbuild = \"build.rs\"\ncategories = [\n    \"api-bindings\",\n    \"compression\",\n]\ndescription = \"Low-level bindings for the zstd compression library.\"\nkeywords = [\n    \"zstd\",\n    \"zstandard\",\n    \"compression\",\n]\nlicense = \"BSD-3-Clause\"\nlinks = \"zstd\"\nname = \"zstd-sys\"\nreadme = \"Readme.md\"\nrepository = \"https://github.com/gyscos/zstd-rs\"\nversion = \"2.0.16+zstd.1.5.7\"\nedition = \"2018\"\nrust-version = \"1.64\"\n\n# Use include instead of exclude, as a (temporary)\n# workaround for https://github.com/rust-lang/cargo/issues/9555\ninclude = [\n    \"/LICENSE*\",\n    \"!/*.sh\",\n    \"/build.rs\",\n    \"/*.h\",\n    \"/src/\",\n    \"/wasm-shim/**/*.h\",\n    \"/zstd/LICENSE\",\n    \"/zstd/COPYING\",\n    \"/zstd/lib/**/*.c\",\n    \"/zstd/lib/**/*.h\",\n    \"/zstd/lib/**/*.S\",\n    \"/zstd/contrib/seekable_format/*.c\",\n    \"/zstd/contrib/seekable_format/*.h\",\n]\n# exclude = [\n#     \"zstd\",\n#     \"!zstd/LICENSE\",\n#     \"!zstd/COPYING\",\n#     \"!zstd/lib/**/**.h\",\n#     \"!zstd/lib/**/**.c\",\n# ]\n\n[package.metadata.docs.rs]\nfeatures = [\"experimental\"]\n\n[lib]\ndoctest = false  # Documentation is for C code, good luck testing that.\n\n[build-dependencies.bindgen]\noptional = true\nversion = \"0.72\"\ndefault-features = false\nfeatures = [\"runtime\"]\n\n[build-dependencies.pkg-config]\nversion = \"0.3.28\"\n\n[build-dependencies.cc]\nversion = \"1.0.45\"\nfeatures = [\"parallel\"]\n\n[features]\ndefault = [\"legacy\", \"zdict_builder\", \"bindgen\"]\n\ndebug = [] # Enable zstd debug logs\nexperimental = [] # Expose experimental ZSTD API\nlegacy = [] # Enable legacy ZSTD support (for versions < zstd-0.8)\nnon-cargo = [] # Silence cargo-specific build flags\npkg-config = [] # Use pkg-config to build the zstd C library.\nstd = [] # Deprecated: we never use types from std.\nzstdmt = [] # Enable multi-thread support (with pthread)\nthin = [] # Optimize binary by size\nno_asm = [] # Disable ASM files (only on amd64 for decompression)\nzdict_builder = [] # Enable dictionary building (dictionary _using_ is always supported).\nno_wasm_shim = [] # Disable wasm shims (in case your wasm toolchain includes a C stdlib).\nseekable = [] # Enable support of the seekable format\n\n# These two are for cross-language LTO.\n# Will only work if `clang` is used to build the C library.\nfat-lto = [] # Enable fat-lto, will override thin-lto if specified\nthin-lto = [] # Enable thin-lto, will fallback to fat-lto if not supported\n\n[lints.rust]\nnon_upper_case_globals = \"allow\"\n"
  },
  {
    "path": "zstd-safe/zstd-sys/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2026, Alexandre Bury\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its\n   contributors may be used to endorse or promote products derived from\n   this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "zstd-safe/zstd-sys/LICENSE.BSD-3-Clause",
    "content": "The auto-generated bindings are under the 3-clause BSD license:\n\nBSD License\n\nFor Zstandard software\n\nCopyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n\n * Neither the name Facebook, nor Meta, nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "zstd-safe/zstd-sys/Readme.md",
    "content": "# zstd-sys\n\nThis is the low-level auto-generated binding to the [zstd] library.\nYou probably don't want to use this library directly; instead, look at [zstd-rs] or [zstd-safe].\n\n# Compile it yourself\n\n`zstd` is included as a submodule. To get everything during your clone, use:\n\n```\ngit clone https://github.com/gyscos/zstd-rs --recursive\n```\n\nOr, if you cloned it without the `--recursive` flag,\ncall this from inside the repository:\n\n```\ngit submodule update --init\n```\n\nThen, running `cargo build` in this directory should\ntake care of building the C library and linking to it.\n\n# Build-time bindgen\n\nThis library includes a pre-generated `bindings.rs` file.\nYou can also generate new bindings at build-time, using the `bindgen` feature:\n\n```\ncargo build --features bindgen\n```\n\n[zstd]: https://github.com/facebook/zstd\n[zstd-rs]: https://github.com/gyscos/zstd-rs\n[zstd-safe]: https://github.com/gyscos/zstd-rs/tree/main/zstd-safe\n"
  },
  {
    "path": "zstd-safe/zstd-sys/build.rs",
    "content": "use std::ffi::OsStr;\nuse std::path::{Path, PathBuf};\nuse std::{env, fmt, fs};\n\n#[cfg(feature = \"bindgen\")]\nfn generate_bindings(defs: Vec<&str>, headerpaths: Vec<PathBuf>) {\n    use bindgen::RustTarget;\n\n    let bindings = bindgen::Builder::default().header(\"zstd.h\");\n\n    #[cfg(feature = \"zdict_builder\")]\n    let bindings = bindings.header(\"zdict.h\");\n\n    #[cfg(feature = \"seekable\")]\n    let bindings = bindings.header(\"zstd_seekable.h\");\n\n    let bindings = bindings\n        .layout_tests(false)\n        .blocklist_type(\"max_align_t\")\n        .size_t_is_usize(true)\n        .rust_target(\n            RustTarget::stable(64, 0)\n                .ok()\n                .expect(\"Could not get 1.64.0 version\"),\n        )\n        .use_core()\n        .rustified_enum(\".*\")\n        .clang_args(\n            headerpaths\n                .into_iter()\n                .map(|path| format!(\"-I{}\", path.display())),\n        )\n        .clang_args(defs.into_iter().map(|def| format!(\"-D{}\", def)));\n\n    #[cfg(feature = \"experimental\")]\n    let bindings = bindings\n        .clang_arg(\"-DZSTD_STATIC_LINKING_ONLY\")\n        .clang_arg(\"-DZDICT_STATIC_LINKING_ONLY\")\n        .clang_arg(\"-DZSTD_RUST_BINDINGS_EXPERIMENTAL\");\n\n    #[cfg(feature = \"seekable\")]\n    let bindings = bindings.blocklist_function(\"ZSTD_seekable_initFile\");\n\n    let bindings = bindings.generate().expect(\"Unable to generate bindings\");\n\n    let out_path = PathBuf::from(env::var_os(\"OUT_DIR\").unwrap());\n    bindings\n        .write_to_file(out_path.join(\"bindings.rs\"))\n        .expect(\"Could not write bindings\");\n}\n\n#[cfg(not(feature = \"bindgen\"))]\nfn generate_bindings(_: Vec<&str>, _: Vec<PathBuf>) {}\n\nfn pkg_config() -> (Vec<&'static str>, Vec<PathBuf>) {\n    let library = pkg_config::Config::new()\n        .statik(true)\n        .cargo_metadata(!cfg!(feature = \"non-cargo\"))\n        .probe(\"libzstd\")\n        .expect(\"Can't probe for zstd in pkg-config\");\n    (vec![\"PKG_CONFIG\"], library.include_paths)\n}\n\n#[cfg(not(feature = \"legacy\"))]\nfn set_legacy(_config: &mut cc::Build) {}\n\n#[cfg(feature = \"legacy\")]\nfn set_legacy(config: &mut cc::Build) {\n    config.define(\"ZSTD_LEGACY_SUPPORT\", Some(\"1\"));\n    config.include(\"zstd/lib/legacy\");\n}\n\n#[cfg(feature = \"zstdmt\")]\nfn set_pthread(config: &mut cc::Build) {\n    config.flag(\"-pthread\");\n}\n\n#[cfg(not(feature = \"zstdmt\"))]\nfn set_pthread(_config: &mut cc::Build) {}\n\n#[cfg(feature = \"zstdmt\")]\nfn enable_threading(config: &mut cc::Build) {\n    config.define(\"ZSTD_MULTITHREAD\", Some(\"\"));\n}\n\n#[cfg(not(feature = \"zstdmt\"))]\nfn enable_threading(_config: &mut cc::Build) {}\n\n/// This function would find the first flag in `flags` that is supported\n/// and add that to `config`.\n#[allow(dead_code)]\nfn flag_if_supported_with_fallbacks(config: &mut cc::Build, flags: &[&str]) {\n    let option = flags\n        .iter()\n        .find(|flag| config.is_flag_supported(flag).unwrap_or_default());\n\n    if let Some(flag) = option {\n        config.flag(flag);\n    }\n}\n\nfn compile_zstd() {\n    let mut config = cc::Build::new();\n\n    // Search the following directories for C files to add to the compilation.\n    for dir in &[\n        \"zstd/lib/common\",\n        \"zstd/lib/compress\",\n        \"zstd/lib/decompress\",\n        #[cfg(feature = \"seekable\")]\n        \"zstd/contrib/seekable_format\",\n        #[cfg(feature = \"zdict_builder\")]\n        \"zstd/lib/dictBuilder\",\n        #[cfg(feature = \"legacy\")]\n        \"zstd/lib/legacy\",\n    ] {\n        let mut entries: Vec<_> = fs::read_dir(dir)\n            .unwrap()\n            .map(Result::unwrap)\n            .filter_map(|entry| {\n                let filename = entry.file_name();\n\n                if Path::new(&filename).extension() == Some(OsStr::new(\"c\"))\n                    // Skip xxhash*.c files: since we are using the \"PRIVATE API\"\n                    // mode, it will be inlined in the headers.\n                    && !filename.to_string_lossy().contains(\"xxhash\")\n                {\n                    Some(entry.path())\n                } else {\n                    None\n                }\n            })\n            .collect();\n        entries.sort();\n\n        config.files(entries);\n    }\n\n    // Either include ASM files, or disable ASM entirely.\n    // Also disable it on windows, apparently it doesn't do well with these .S files at the moment.\n    if cfg!(feature = \"no_asm\") || std::env::var(\"CARGO_CFG_WINDOWS\").is_ok() {\n        config.define(\"ZSTD_DISABLE_ASM\", Some(\"\"));\n    } else {\n        config.file(\"zstd/lib/decompress/huf_decompress_amd64.S\");\n    }\n\n    // List out the WASM targets that need wasm-shim.\n    // Note that Emscripten already provides its own C standard library so\n    // wasm32-unknown-emscripten should not be included here.\n    // See: https://github.com/gyscos/zstd-rs/pull/209\n    let need_wasm_shim = !cfg!(feature = \"no_wasm_shim\")\n        && env::var(\"TARGET\").map_or(false, |target| {\n            target == \"wasm32-unknown-unknown\"\n                || target.starts_with(\"wasm32-wasi\")\n        });\n\n    if need_wasm_shim {\n        cargo_print(&\"rerun-if-changed=wasm-shim/stdlib.h\");\n        cargo_print(&\"rerun-if-changed=wasm-shim/string.h\");\n\n        config.include(\"wasm-shim/\");\n    }\n\n    // Some extra parameters\n    config.include(\"zstd/lib/\");\n    config.include(\"zstd/lib/common\");\n    config.warnings(false);\n\n    config.define(\"ZSTD_LIB_DEPRECATED\", Some(\"0\"));\n\n    config\n        .flag_if_supported(\"-ffunction-sections\")\n        .flag_if_supported(\"-fdata-sections\")\n        .flag_if_supported(\"-fmerge-all-constants\");\n\n    if cfg!(feature = \"fat-lto\") {\n        config.flag_if_supported(\"-flto\");\n    } else if cfg!(feature = \"thin-lto\") {\n        flag_if_supported_with_fallbacks(\n            &mut config,\n            &[\"-flto=thin\", \"-flto\"],\n        );\n    }\n\n    #[cfg(feature = \"thin\")]\n    {\n        // Here we try to build a lib as thin/small as possible.\n        // We cannot use ZSTD_LIB_MINIFY since it is only\n        // used in Makefile to define other options.\n\n        config\n            .define(\"HUF_FORCE_DECOMPRESS_X1\", Some(\"1\"))\n            .define(\"ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT\", Some(\"1\"))\n            .define(\"ZSTD_NO_INLINE\", Some(\"1\"))\n            // removes the error messages that are\n            // otherwise returned by ZSTD_getErrorName\n            .define(\"ZSTD_STRIP_ERROR_STRINGS\", Some(\"1\"));\n\n        // Disable use of BMI2 instructions since it involves runtime checking\n        // of the feature and fallback if no BMI2 instruction is detected.\n        config.define(\"DYNAMIC_BMI2\", Some(\"0\"));\n\n        // Disable support for all legacy formats\n        #[cfg(not(feature = \"legacy\"))]\n        config.define(\"ZSTD_LEGACY_SUPPORT\", Some(\"0\"));\n\n        config.opt_level_str(\"z\");\n    }\n\n    // Hide symbols from resulting library,\n    // so we can be used with another zstd-linking lib.\n    // See https://github.com/gyscos/zstd-rs/issues/58\n    if ! cfg!(target_env = \"msvc\") {\n        config.flag(\"-fvisibility=hidden\");\n    }\n    config.define(\"XXH_PRIVATE_API\", Some(\"\"));\n    config.define(\"ZSTDLIB_VISIBILITY\", Some(\"\"));\n    #[cfg(feature = \"zdict_builder\")]\n    config.define(\"ZDICTLIB_VISIBILITY\", Some(\"\"));\n    config.define(\"ZSTDERRORLIB_VISIBILITY\", Some(\"\"));\n\n    // https://github.com/facebook/zstd/blob/d69d08ed6c83563b57d98132e1e3f2487880781e/lib/common/debug.h#L60\n    /* recommended values for DEBUGLEVEL :\n     * 0 : release mode, no debug, all run-time checks disabled\n     * 1 : enables assert() only, no display\n     * 2 : reserved, for currently active debug path\n     * 3 : events once per object lifetime (CCtx, CDict, etc.)\n     * 4 : events once per frame\n     * 5 : events once per block\n     * 6 : events once per sequence (verbose)\n     * 7+: events at every position (*very* verbose)\n     */\n    #[cfg(feature = \"debug\")]\n    if !need_wasm_shim {\n        config.define(\"DEBUGLEVEL\", Some(\"5\"));\n    }\n\n    set_pthread(&mut config);\n    set_legacy(&mut config);\n    enable_threading(&mut config);\n\n    // Compile!\n    config.compile(\"libzstd.a\");\n\n    let src = env::current_dir().unwrap().join(\"zstd\").join(\"lib\");\n    let dst = PathBuf::from(env::var_os(\"OUT_DIR\").unwrap());\n    let include = dst.join(\"include\");\n    fs::create_dir_all(&include).unwrap();\n    fs::copy(src.join(\"zstd.h\"), include.join(\"zstd.h\")).unwrap();\n    fs::copy(src.join(\"zstd_errors.h\"), include.join(\"zstd_errors.h\"))\n        .unwrap();\n    #[cfg(feature = \"zdict_builder\")]\n    fs::copy(src.join(\"zdict.h\"), include.join(\"zdict.h\")).unwrap();\n    cargo_print(&format_args!(\"root={}\", dst.display()));\n}\n\n/// Print a line for cargo.\n///\n/// If non-cargo is set, do not print anything.\nfn cargo_print(content: &dyn fmt::Display) {\n    if cfg!(not(feature = \"non-cargo\")) {\n        println!(\"cargo:{}\", content);\n    }\n}\n\nfn main() {\n    cargo_print(&\"rerun-if-env-changed=ZSTD_SYS_USE_PKG_CONFIG\");\n\n    let target_arch =\n        std::env::var(\"CARGO_CFG_TARGET_ARCH\").unwrap_or_default();\n    let target_os = std::env::var(\"CARGO_CFG_TARGET_OS\").unwrap_or_default();\n\n    if target_arch == \"wasm32\" || target_os == \"hermit\" {\n        cargo_print(&\"rustc-cfg=feature=\\\"std\\\"\");\n    }\n\n    // println!(\"cargo:rustc-link-lib=zstd\");\n    let (defs, headerpaths) = if cfg!(feature = \"pkg-config\")\n        || env::var_os(\"ZSTD_SYS_USE_PKG_CONFIG\").is_some()\n    {\n        pkg_config()\n    } else {\n        if !Path::new(\"zstd/lib\").exists() {\n            panic!(\"Folder 'zstd/lib' does not exists. Maybe you forgot to clone the 'zstd' submodule?\");\n        }\n\n        let manifest_dir = PathBuf::from(\n            env::var_os(\"CARGO_MANIFEST_DIR\")\n                .expect(\"Manifest dir is always set by cargo\"),\n        );\n\n        compile_zstd();\n        (vec![], vec![manifest_dir.join(\"zstd/lib\")])\n    };\n\n    let includes: Vec<_> = headerpaths\n        .iter()\n        .map(|p| p.display().to_string())\n        .collect();\n    cargo_print(&format_args!(\"include={}\", includes.join(\";\")));\n\n    generate_bindings(defs, headerpaths);\n}\n"
  },
  {
    "path": "zstd-safe/zstd-sys/examples/it_work.rs",
    "content": "use std::convert::TryInto;\n\n#[no_mangle]\npub extern \"C\" fn zstd_version() -> u32 {\n    unsafe { zstd_sys::ZSTD_versionNumber() }\n}\n\nmacro_rules! zstd_check {\n    ( $ret:expr ) => {{\n        let ret = $ret;\n        let error_code = unsafe { zstd_sys::ZSTD_isError(ret) };\n        assert_eq!(error_code, 0);\n    }};\n}\n\n#[no_mangle]\npub extern \"C\" fn test_compress() -> bool {\n    let fbuf = include_bytes!(\"../Cargo.toml\");\n\n    let cbufsize = unsafe { zstd_sys::ZSTD_compressBound(fbuf.len()) };\n    let mut cbuf = vec![0; cbufsize];\n\n    let csize = unsafe {\n        zstd_sys::ZSTD_compress(\n            cbuf.as_mut_ptr().cast(),\n            cbuf.len(),\n            fbuf.as_ptr().cast(),\n            fbuf.len(),\n            1,\n        )\n    };\n    zstd_check!(csize);\n    let cbuf = &cbuf[..csize];\n\n    let rsize = unsafe {\n        zstd_sys::ZSTD_getFrameContentSize(cbuf.as_ptr().cast(), cbuf.len())\n    };\n    let rsize = rsize.try_into().unwrap();\n    let mut rbuf = vec![0; rsize];\n\n    let dsize = unsafe {\n        zstd_sys::ZSTD_decompress(\n            rbuf.as_mut_ptr().cast(),\n            rbuf.len(),\n            cbuf.as_ptr().cast(),\n            cbuf.len(),\n        )\n    };\n    zstd_check!(dsize);\n    assert_eq!(dsize, rsize);\n\n    &fbuf[..] == &rbuf[..]\n}\n\nfn main() {}\n"
  },
  {
    "path": "zstd-safe/zstd-sys/src/bindings_zdict.rs",
    "content": "/*\nThis file is auto-generated from the public API of the zstd library.\nIt is released under the same BSD license.\n\nBSD License\n\nFor Zstandard software\n\nCopyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n\n * Neither the name Facebook, nor Meta, nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n*/\n/* automatically generated by rust-bindgen 0.71.1 */\n\nextern \"C\" {\n    #[doc = \" ZDICT_trainFromBuffer():\\n  Train a dictionary from an array of samples.\\n  Redirect towards ZDICT_optimizeTrainFromBuffer_fastCover() single-threaded, with d=8, steps=4,\\n  f=20, and accel=1.\\n  Samples must be stored concatenated in a single flat buffer `samplesBuffer`,\\n  supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.\\n  The resulting dictionary will be saved into `dictBuffer`.\\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\\n          or an error code, which can be tested with ZDICT_isError().\\n  Note:  Dictionary training will fail if there are not enough samples to construct a\\n         dictionary, or if most of the samples are too small (< 8 bytes being the lower limit).\\n         If dictionary training fails, you should use zstd without a dictionary, as the dictionary\\n         would've been ineffective anyways. If you believe your samples would benefit from a dictionary\\n         please open an issue with details, and we can look into it.\\n  Note: ZDICT_trainFromBuffer()'s memory usage is about 6 MB.\\n  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.\\n        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.\\n        In general, it's recommended to provide a few thousands samples, though this can vary a lot.\\n        It's recommended that total size of all samples be about ~x100 times the target size of dictionary.\"]\n    pub fn ZDICT_trainFromBuffer(\n        dictBuffer: *mut ::core::ffi::c_void,\n        dictBufferCapacity: usize,\n        samplesBuffer: *const ::core::ffi::c_void,\n        samplesSizes: *const usize,\n        nbSamples: ::core::ffi::c_uint,\n    ) -> usize;\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZDICT_params_t {\n    #[doc = \"< optimize for a specific zstd compression level; 0 means default\"]\n    pub compressionLevel: ::core::ffi::c_int,\n    #[doc = \"< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug;\"]\n    pub notificationLevel: ::core::ffi::c_uint,\n    #[doc = \"< force dictID value; 0 means auto mode (32-bits random value)\\n   NOTE: The zstd format reserves some dictionary IDs for future use.\\n         You may use them in private settings, but be warned that they\\n         may be used by zstd in a public dictionary registry in the future.\\n         These dictionary IDs are:\\n           - low range  : <= 32767\\n           - high range : >= (2^31)\"]\n    pub dictID: ::core::ffi::c_uint,\n}\nextern \"C\" {\n    #[doc = \" ZDICT_finalizeDictionary():\\n Given a custom content as a basis for dictionary, and a set of samples,\\n finalize dictionary by adding headers and statistics according to the zstd\\n dictionary format.\\n\\n Samples must be stored concatenated in a flat buffer `samplesBuffer`,\\n supplied with an array of sizes `samplesSizes`, providing the size of each\\n sample in order. The samples are used to construct the statistics, so they\\n should be representative of what you will compress with this dictionary.\\n\\n The compression level can be set in `parameters`. You should pass the\\n compression level you expect to use in production. The statistics for each\\n compression level differ, so tuning the dictionary for the compression level\\n can help quite a bit.\\n\\n You can set an explicit dictionary ID in `parameters`, or allow us to pick\\n a random dictionary ID for you, but we can't guarantee no collisions.\\n\\n The dstDictBuffer and the dictContent may overlap, and the content will be\\n appended to the end of the header. If the header + the content doesn't fit in\\n maxDictSize the beginning of the content is truncated to make room, since it\\n is presumed that the most profitable content is at the end of the dictionary,\\n since that is the cheapest to reference.\\n\\n `maxDictSize` must be >= max(dictContentSize, ZDICT_DICTSIZE_MIN).\\n\\n @return: size of dictionary stored into `dstDictBuffer` (<= `maxDictSize`),\\n          or an error code, which can be tested by ZDICT_isError().\\n Note: ZDICT_finalizeDictionary() will push notifications into stderr if\\n       instructed to, using notificationLevel>0.\\n NOTE: This function currently may fail in several edge cases including:\\n         * Not enough samples\\n         * Samples are uncompressible\\n         * Samples are all exactly the same\"]\n    pub fn ZDICT_finalizeDictionary(\n        dstDictBuffer: *mut ::core::ffi::c_void,\n        maxDictSize: usize,\n        dictContent: *const ::core::ffi::c_void,\n        dictContentSize: usize,\n        samplesBuffer: *const ::core::ffi::c_void,\n        samplesSizes: *const usize,\n        nbSamples: ::core::ffi::c_uint,\n        parameters: ZDICT_params_t,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZDICT_getDictID(\n        dictBuffer: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    pub fn ZDICT_getDictHeaderSize(\n        dictBuffer: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZDICT_isError(errorCode: usize) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    pub fn ZDICT_getErrorName(errorCode: usize) -> *const ::core::ffi::c_char;\n}\n"
  },
  {
    "path": "zstd-safe/zstd-sys/src/bindings_zdict_experimental.rs",
    "content": "/*\nThis file is auto-generated from the public API of the zstd library.\nIt is released under the same BSD license.\n\nBSD License\n\nFor Zstandard software\n\nCopyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n\n * Neither the name Facebook, nor Meta, nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n*/\n/* automatically generated by rust-bindgen 0.71.1 */\n\npub const ZDICT_DICTSIZE_MIN: u32 = 256;\npub const ZDICT_CONTENTSIZE_MIN: u32 = 128;\nextern \"C\" {\n    #[doc = \" ZDICT_trainFromBuffer():\\n  Train a dictionary from an array of samples.\\n  Redirect towards ZDICT_optimizeTrainFromBuffer_fastCover() single-threaded, with d=8, steps=4,\\n  f=20, and accel=1.\\n  Samples must be stored concatenated in a single flat buffer `samplesBuffer`,\\n  supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.\\n  The resulting dictionary will be saved into `dictBuffer`.\\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\\n          or an error code, which can be tested with ZDICT_isError().\\n  Note:  Dictionary training will fail if there are not enough samples to construct a\\n         dictionary, or if most of the samples are too small (< 8 bytes being the lower limit).\\n         If dictionary training fails, you should use zstd without a dictionary, as the dictionary\\n         would've been ineffective anyways. If you believe your samples would benefit from a dictionary\\n         please open an issue with details, and we can look into it.\\n  Note: ZDICT_trainFromBuffer()'s memory usage is about 6 MB.\\n  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.\\n        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.\\n        In general, it's recommended to provide a few thousands samples, though this can vary a lot.\\n        It's recommended that total size of all samples be about ~x100 times the target size of dictionary.\"]\n    pub fn ZDICT_trainFromBuffer(\n        dictBuffer: *mut ::core::ffi::c_void,\n        dictBufferCapacity: usize,\n        samplesBuffer: *const ::core::ffi::c_void,\n        samplesSizes: *const usize,\n        nbSamples: ::core::ffi::c_uint,\n    ) -> usize;\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZDICT_params_t {\n    #[doc = \"< optimize for a specific zstd compression level; 0 means default\"]\n    pub compressionLevel: ::core::ffi::c_int,\n    #[doc = \"< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug;\"]\n    pub notificationLevel: ::core::ffi::c_uint,\n    #[doc = \"< force dictID value; 0 means auto mode (32-bits random value)\\n   NOTE: The zstd format reserves some dictionary IDs for future use.\\n         You may use them in private settings, but be warned that they\\n         may be used by zstd in a public dictionary registry in the future.\\n         These dictionary IDs are:\\n           - low range  : <= 32767\\n           - high range : >= (2^31)\"]\n    pub dictID: ::core::ffi::c_uint,\n}\nextern \"C\" {\n    #[doc = \" ZDICT_finalizeDictionary():\\n Given a custom content as a basis for dictionary, and a set of samples,\\n finalize dictionary by adding headers and statistics according to the zstd\\n dictionary format.\\n\\n Samples must be stored concatenated in a flat buffer `samplesBuffer`,\\n supplied with an array of sizes `samplesSizes`, providing the size of each\\n sample in order. The samples are used to construct the statistics, so they\\n should be representative of what you will compress with this dictionary.\\n\\n The compression level can be set in `parameters`. You should pass the\\n compression level you expect to use in production. The statistics for each\\n compression level differ, so tuning the dictionary for the compression level\\n can help quite a bit.\\n\\n You can set an explicit dictionary ID in `parameters`, or allow us to pick\\n a random dictionary ID for you, but we can't guarantee no collisions.\\n\\n The dstDictBuffer and the dictContent may overlap, and the content will be\\n appended to the end of the header. If the header + the content doesn't fit in\\n maxDictSize the beginning of the content is truncated to make room, since it\\n is presumed that the most profitable content is at the end of the dictionary,\\n since that is the cheapest to reference.\\n\\n `maxDictSize` must be >= max(dictContentSize, ZDICT_DICTSIZE_MIN).\\n\\n @return: size of dictionary stored into `dstDictBuffer` (<= `maxDictSize`),\\n          or an error code, which can be tested by ZDICT_isError().\\n Note: ZDICT_finalizeDictionary() will push notifications into stderr if\\n       instructed to, using notificationLevel>0.\\n NOTE: This function currently may fail in several edge cases including:\\n         * Not enough samples\\n         * Samples are uncompressible\\n         * Samples are all exactly the same\"]\n    pub fn ZDICT_finalizeDictionary(\n        dstDictBuffer: *mut ::core::ffi::c_void,\n        maxDictSize: usize,\n        dictContent: *const ::core::ffi::c_void,\n        dictContentSize: usize,\n        samplesBuffer: *const ::core::ffi::c_void,\n        samplesSizes: *const usize,\n        nbSamples: ::core::ffi::c_uint,\n        parameters: ZDICT_params_t,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZDICT_getDictID(\n        dictBuffer: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    pub fn ZDICT_getDictHeaderSize(\n        dictBuffer: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZDICT_isError(errorCode: usize) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    pub fn ZDICT_getErrorName(errorCode: usize) -> *const ::core::ffi::c_char;\n}\n#[doc = \" ZDICT_cover_params_t:\\n  k and d are the only required parameters.\\n  For others, value 0 means default.\"]\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZDICT_cover_params_t {\n    pub k: ::core::ffi::c_uint,\n    pub d: ::core::ffi::c_uint,\n    pub steps: ::core::ffi::c_uint,\n    pub nbThreads: ::core::ffi::c_uint,\n    pub splitPoint: f64,\n    pub shrinkDict: ::core::ffi::c_uint,\n    pub shrinkDictMaxRegression: ::core::ffi::c_uint,\n    pub zParams: ZDICT_params_t,\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZDICT_fastCover_params_t {\n    pub k: ::core::ffi::c_uint,\n    pub d: ::core::ffi::c_uint,\n    pub f: ::core::ffi::c_uint,\n    pub steps: ::core::ffi::c_uint,\n    pub nbThreads: ::core::ffi::c_uint,\n    pub splitPoint: f64,\n    pub accel: ::core::ffi::c_uint,\n    pub shrinkDict: ::core::ffi::c_uint,\n    pub shrinkDictMaxRegression: ::core::ffi::c_uint,\n    pub zParams: ZDICT_params_t,\n}\nextern \"C\" {\n    #[doc = \" ZDICT_trainFromBuffer_cover():\\n  Train a dictionary from an array of samples using the COVER algorithm.\\n  Samples must be stored concatenated in a single flat buffer `samplesBuffer`,\\n  supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.\\n  The resulting dictionary will be saved into `dictBuffer`.\\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\\n          or an error code, which can be tested with ZDICT_isError().\\n          See ZDICT_trainFromBuffer() for details on failure modes.\\n  Note: ZDICT_trainFromBuffer_cover() requires about 9 bytes of memory for each input byte.\\n  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.\\n        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.\\n        In general, it's recommended to provide a few thousands samples, though this can vary a lot.\\n        It's recommended that total size of all samples be about ~x100 times the target size of dictionary.\"]\n    pub fn ZDICT_trainFromBuffer_cover(\n        dictBuffer: *mut ::core::ffi::c_void,\n        dictBufferCapacity: usize,\n        samplesBuffer: *const ::core::ffi::c_void,\n        samplesSizes: *const usize,\n        nbSamples: ::core::ffi::c_uint,\n        parameters: ZDICT_cover_params_t,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZDICT_optimizeTrainFromBuffer_cover():\\n The same requirements as above hold for all the parameters except `parameters`.\\n This function tries many parameter combinations and picks the best parameters.\\n `*parameters` is filled with the best parameters found,\\n dictionary constructed with those parameters is stored in `dictBuffer`.\\n\\n All of the parameters d, k, steps are optional.\\n If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}.\\n if steps is zero it defaults to its default value.\\n If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].\\n\\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\\n          or an error code, which can be tested with ZDICT_isError().\\n          On success `*parameters` contains the parameters selected.\\n          See ZDICT_trainFromBuffer() for details on failure modes.\\n Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread.\"]\n    pub fn ZDICT_optimizeTrainFromBuffer_cover(\n        dictBuffer: *mut ::core::ffi::c_void,\n        dictBufferCapacity: usize,\n        samplesBuffer: *const ::core::ffi::c_void,\n        samplesSizes: *const usize,\n        nbSamples: ::core::ffi::c_uint,\n        parameters: *mut ZDICT_cover_params_t,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZDICT_trainFromBuffer_fastCover():\\n  Train a dictionary from an array of samples using a modified version of COVER algorithm.\\n  Samples must be stored concatenated in a single flat buffer `samplesBuffer`,\\n  supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.\\n  d and k are required.\\n  All other parameters are optional, will use default values if not provided\\n  The resulting dictionary will be saved into `dictBuffer`.\\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\\n          or an error code, which can be tested with ZDICT_isError().\\n          See ZDICT_trainFromBuffer() for details on failure modes.\\n  Note: ZDICT_trainFromBuffer_fastCover() requires 6 * 2^f bytes of memory.\\n  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.\\n        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.\\n        In general, it's recommended to provide a few thousands samples, though this can vary a lot.\\n        It's recommended that total size of all samples be about ~x100 times the target size of dictionary.\"]\n    pub fn ZDICT_trainFromBuffer_fastCover(\n        dictBuffer: *mut ::core::ffi::c_void,\n        dictBufferCapacity: usize,\n        samplesBuffer: *const ::core::ffi::c_void,\n        samplesSizes: *const usize,\n        nbSamples: ::core::ffi::c_uint,\n        parameters: ZDICT_fastCover_params_t,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZDICT_optimizeTrainFromBuffer_fastCover():\\n The same requirements as above hold for all the parameters except `parameters`.\\n This function tries many parameter combinations (specifically, k and d combinations)\\n and picks the best parameters. `*parameters` is filled with the best parameters found,\\n dictionary constructed with those parameters is stored in `dictBuffer`.\\n All of the parameters d, k, steps, f, and accel are optional.\\n If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}.\\n if steps is zero it defaults to its default value.\\n If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].\\n If f is zero, default value of 20 is used.\\n If accel is zero, default value of 1 is used.\\n\\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\\n          or an error code, which can be tested with ZDICT_isError().\\n          On success `*parameters` contains the parameters selected.\\n          See ZDICT_trainFromBuffer() for details on failure modes.\\n Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread.\"]\n    pub fn ZDICT_optimizeTrainFromBuffer_fastCover(\n        dictBuffer: *mut ::core::ffi::c_void,\n        dictBufferCapacity: usize,\n        samplesBuffer: *const ::core::ffi::c_void,\n        samplesSizes: *const usize,\n        nbSamples: ::core::ffi::c_uint,\n        parameters: *mut ZDICT_fastCover_params_t,\n    ) -> usize;\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZDICT_legacy_params_t {\n    pub selectivityLevel: ::core::ffi::c_uint,\n    pub zParams: ZDICT_params_t,\n}\nextern \"C\" {\n    #[doc = \" ZDICT_trainFromBuffer_legacy():\\n  Train a dictionary from an array of samples.\\n  Samples must be stored concatenated in a single flat buffer `samplesBuffer`,\\n  supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.\\n  The resulting dictionary will be saved into `dictBuffer`.\\n `parameters` is optional and can be provided with values set to 0 to mean \\\"default\\\".\\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\\n          or an error code, which can be tested with ZDICT_isError().\\n          See ZDICT_trainFromBuffer() for details on failure modes.\\n  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.\\n        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.\\n        In general, it's recommended to provide a few thousands samples, though this can vary a lot.\\n        It's recommended that total size of all samples be about ~x100 times the target size of dictionary.\\n  Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0.\"]\n    pub fn ZDICT_trainFromBuffer_legacy(\n        dictBuffer: *mut ::core::ffi::c_void,\n        dictBufferCapacity: usize,\n        samplesBuffer: *const ::core::ffi::c_void,\n        samplesSizes: *const usize,\n        nbSamples: ::core::ffi::c_uint,\n        parameters: ZDICT_legacy_params_t,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZDICT_addEntropyTablesFromBuffer(\n        dictBuffer: *mut ::core::ffi::c_void,\n        dictContentSize: usize,\n        dictBufferCapacity: usize,\n        samplesBuffer: *const ::core::ffi::c_void,\n        samplesSizes: *const usize,\n        nbSamples: ::core::ffi::c_uint,\n    ) -> usize;\n}\n"
  },
  {
    "path": "zstd-safe/zstd-sys/src/bindings_zdict_std_experimental.rs",
    "content": "/*\nThis file is auto-generated from the public API of the zstd library.\nIt is released under the same BSD license.\n\nBSD License\n\nFor Zstandard software\n\nCopyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n\n * Neither the name Facebook, nor Meta, nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n*/\n/* automatically generated by rust-bindgen 0.66.1 */\n\npub const ZDICT_DICTSIZE_MIN: u32 = 256;\npub const ZDICT_CONTENTSIZE_MIN: u32 = 128;\nextern \"C\" {\n    #[doc = \" ZDICT_trainFromBuffer():\\n  Train a dictionary from an array of samples.\\n  Redirect towards ZDICT_optimizeTrainFromBuffer_fastCover() single-threaded, with d=8, steps=4,\\n  f=20, and accel=1.\\n  Samples must be stored concatenated in a single flat buffer `samplesBuffer`,\\n  supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.\\n  The resulting dictionary will be saved into `dictBuffer`.\\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\\n          or an error code, which can be tested with ZDICT_isError().\\n  Note:  Dictionary training will fail if there are not enough samples to construct a\\n         dictionary, or if most of the samples are too small (< 8 bytes being the lower limit).\\n         If dictionary training fails, you should use zstd without a dictionary, as the dictionary\\n         would've been ineffective anyways. If you believe your samples would benefit from a dictionary\\n         please open an issue with details, and we can look into it.\\n  Note: ZDICT_trainFromBuffer()'s memory usage is about 6 MB.\\n  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.\\n        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.\\n        In general, it's recommended to provide a few thousands samples, though this can vary a lot.\\n        It's recommended that total size of all samples be about ~x100 times the target size of dictionary.\"]\n    pub fn ZDICT_trainFromBuffer(\n        dictBuffer: *mut ::core::ffi::c_void,\n        dictBufferCapacity: usize,\n        samplesBuffer: *const ::core::ffi::c_void,\n        samplesSizes: *const usize,\n        nbSamples: ::core::ffi::c_uint,\n    ) -> usize;\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZDICT_params_t {\n    #[doc = \"< optimize for a specific zstd compression level; 0 means default\"]\n    pub compressionLevel: ::core::ffi::c_int,\n    #[doc = \"< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug;\"]\n    pub notificationLevel: ::core::ffi::c_uint,\n    #[doc = \"< force dictID value; 0 means auto mode (32-bits random value)\\n   NOTE: The zstd format reserves some dictionary IDs for future use.\\n         You may use them in private settings, but be warned that they\\n         may be used by zstd in a public dictionary registry in the future.\\n         These dictionary IDs are:\\n           - low range  : <= 32767\\n           - high range : >= (2^31)\"]\n    pub dictID: ::core::ffi::c_uint,\n}\nextern \"C\" {\n    #[doc = \" ZDICT_finalizeDictionary():\\n Given a custom content as a basis for dictionary, and a set of samples,\\n finalize dictionary by adding headers and statistics according to the zstd\\n dictionary format.\\n\\n Samples must be stored concatenated in a flat buffer `samplesBuffer`,\\n supplied with an array of sizes `samplesSizes`, providing the size of each\\n sample in order. The samples are used to construct the statistics, so they\\n should be representative of what you will compress with this dictionary.\\n\\n The compression level can be set in `parameters`. You should pass the\\n compression level you expect to use in production. The statistics for each\\n compression level differ, so tuning the dictionary for the compression level\\n can help quite a bit.\\n\\n You can set an explicit dictionary ID in `parameters`, or allow us to pick\\n a random dictionary ID for you, but we can't guarantee no collisions.\\n\\n The dstDictBuffer and the dictContent may overlap, and the content will be\\n appended to the end of the header. If the header + the content doesn't fit in\\n maxDictSize the beginning of the content is truncated to make room, since it\\n is presumed that the most profitable content is at the end of the dictionary,\\n since that is the cheapest to reference.\\n\\n `maxDictSize` must be >= max(dictContentSize, ZSTD_DICTSIZE_MIN).\\n\\n @return: size of dictionary stored into `dstDictBuffer` (<= `maxDictSize`),\\n          or an error code, which can be tested by ZDICT_isError().\\n Note: ZDICT_finalizeDictionary() will push notifications into stderr if\\n       instructed to, using notificationLevel>0.\\n NOTE: This function currently may fail in several edge cases including:\\n         * Not enough samples\\n         * Samples are uncompressible\\n         * Samples are all exactly the same\"]\n    pub fn ZDICT_finalizeDictionary(\n        dstDictBuffer: *mut ::core::ffi::c_void,\n        maxDictSize: usize,\n        dictContent: *const ::core::ffi::c_void,\n        dictContentSize: usize,\n        samplesBuffer: *const ::core::ffi::c_void,\n        samplesSizes: *const usize,\n        nbSamples: ::core::ffi::c_uint,\n        parameters: ZDICT_params_t,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZDICT_getDictID(\n        dictBuffer: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    pub fn ZDICT_getDictHeaderSize(\n        dictBuffer: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZDICT_isError(errorCode: usize) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    pub fn ZDICT_getErrorName(errorCode: usize) -> *const ::core::ffi::c_char;\n}\n#[doc = \" ZDICT_cover_params_t:\\n  k and d are the only required parameters.\\n  For others, value 0 means default.\"]\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZDICT_cover_params_t {\n    pub k: ::core::ffi::c_uint,\n    pub d: ::core::ffi::c_uint,\n    pub steps: ::core::ffi::c_uint,\n    pub nbThreads: ::core::ffi::c_uint,\n    pub splitPoint: f64,\n    pub shrinkDict: ::core::ffi::c_uint,\n    pub shrinkDictMaxRegression: ::core::ffi::c_uint,\n    pub zParams: ZDICT_params_t,\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZDICT_fastCover_params_t {\n    pub k: ::core::ffi::c_uint,\n    pub d: ::core::ffi::c_uint,\n    pub f: ::core::ffi::c_uint,\n    pub steps: ::core::ffi::c_uint,\n    pub nbThreads: ::core::ffi::c_uint,\n    pub splitPoint: f64,\n    pub accel: ::core::ffi::c_uint,\n    pub shrinkDict: ::core::ffi::c_uint,\n    pub shrinkDictMaxRegression: ::core::ffi::c_uint,\n    pub zParams: ZDICT_params_t,\n}\nextern \"C\" {\n    #[doc = \" ZDICT_trainFromBuffer_cover():\\n  Train a dictionary from an array of samples using the COVER algorithm.\\n  Samples must be stored concatenated in a single flat buffer `samplesBuffer`,\\n  supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.\\n  The resulting dictionary will be saved into `dictBuffer`.\\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\\n          or an error code, which can be tested with ZDICT_isError().\\n          See ZDICT_trainFromBuffer() for details on failure modes.\\n  Note: ZDICT_trainFromBuffer_cover() requires about 9 bytes of memory for each input byte.\\n  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.\\n        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.\\n        In general, it's recommended to provide a few thousands samples, though this can vary a lot.\\n        It's recommended that total size of all samples be about ~x100 times the target size of dictionary.\"]\n    pub fn ZDICT_trainFromBuffer_cover(\n        dictBuffer: *mut ::core::ffi::c_void,\n        dictBufferCapacity: usize,\n        samplesBuffer: *const ::core::ffi::c_void,\n        samplesSizes: *const usize,\n        nbSamples: ::core::ffi::c_uint,\n        parameters: ZDICT_cover_params_t,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZDICT_optimizeTrainFromBuffer_cover():\\n The same requirements as above hold for all the parameters except `parameters`.\\n This function tries many parameter combinations and picks the best parameters.\\n `*parameters` is filled with the best parameters found,\\n dictionary constructed with those parameters is stored in `dictBuffer`.\\n\\n All of the parameters d, k, steps are optional.\\n If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}.\\n if steps is zero it defaults to its default value.\\n If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].\\n\\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\\n          or an error code, which can be tested with ZDICT_isError().\\n          On success `*parameters` contains the parameters selected.\\n          See ZDICT_trainFromBuffer() for details on failure modes.\\n Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread.\"]\n    pub fn ZDICT_optimizeTrainFromBuffer_cover(\n        dictBuffer: *mut ::core::ffi::c_void,\n        dictBufferCapacity: usize,\n        samplesBuffer: *const ::core::ffi::c_void,\n        samplesSizes: *const usize,\n        nbSamples: ::core::ffi::c_uint,\n        parameters: *mut ZDICT_cover_params_t,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZDICT_trainFromBuffer_fastCover():\\n  Train a dictionary from an array of samples using a modified version of COVER algorithm.\\n  Samples must be stored concatenated in a single flat buffer `samplesBuffer`,\\n  supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.\\n  d and k are required.\\n  All other parameters are optional, will use default values if not provided\\n  The resulting dictionary will be saved into `dictBuffer`.\\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\\n          or an error code, which can be tested with ZDICT_isError().\\n          See ZDICT_trainFromBuffer() for details on failure modes.\\n  Note: ZDICT_trainFromBuffer_fastCover() requires 6 * 2^f bytes of memory.\\n  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.\\n        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.\\n        In general, it's recommended to provide a few thousands samples, though this can vary a lot.\\n        It's recommended that total size of all samples be about ~x100 times the target size of dictionary.\"]\n    pub fn ZDICT_trainFromBuffer_fastCover(\n        dictBuffer: *mut ::core::ffi::c_void,\n        dictBufferCapacity: usize,\n        samplesBuffer: *const ::core::ffi::c_void,\n        samplesSizes: *const usize,\n        nbSamples: ::core::ffi::c_uint,\n        parameters: ZDICT_fastCover_params_t,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZDICT_optimizeTrainFromBuffer_fastCover():\\n The same requirements as above hold for all the parameters except `parameters`.\\n This function tries many parameter combinations (specifically, k and d combinations)\\n and picks the best parameters. `*parameters` is filled with the best parameters found,\\n dictionary constructed with those parameters is stored in `dictBuffer`.\\n All of the parameters d, k, steps, f, and accel are optional.\\n If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}.\\n if steps is zero it defaults to its default value.\\n If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].\\n If f is zero, default value of 20 is used.\\n If accel is zero, default value of 1 is used.\\n\\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\\n          or an error code, which can be tested with ZDICT_isError().\\n          On success `*parameters` contains the parameters selected.\\n          See ZDICT_trainFromBuffer() for details on failure modes.\\n Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread.\"]\n    pub fn ZDICT_optimizeTrainFromBuffer_fastCover(\n        dictBuffer: *mut ::core::ffi::c_void,\n        dictBufferCapacity: usize,\n        samplesBuffer: *const ::core::ffi::c_void,\n        samplesSizes: *const usize,\n        nbSamples: ::core::ffi::c_uint,\n        parameters: *mut ZDICT_fastCover_params_t,\n    ) -> usize;\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZDICT_legacy_params_t {\n    pub selectivityLevel: ::core::ffi::c_uint,\n    pub zParams: ZDICT_params_t,\n}\nextern \"C\" {\n    #[doc = \" ZDICT_trainFromBuffer_legacy():\\n  Train a dictionary from an array of samples.\\n  Samples must be stored concatenated in a single flat buffer `samplesBuffer`,\\n  supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.\\n  The resulting dictionary will be saved into `dictBuffer`.\\n `parameters` is optional and can be provided with values set to 0 to mean \\\"default\\\".\\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\\n          or an error code, which can be tested with ZDICT_isError().\\n          See ZDICT_trainFromBuffer() for details on failure modes.\\n  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.\\n        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.\\n        In general, it's recommended to provide a few thousands samples, though this can vary a lot.\\n        It's recommended that total size of all samples be about ~x100 times the target size of dictionary.\\n  Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0.\"]\n    pub fn ZDICT_trainFromBuffer_legacy(\n        dictBuffer: *mut ::core::ffi::c_void,\n        dictBufferCapacity: usize,\n        samplesBuffer: *const ::core::ffi::c_void,\n        samplesSizes: *const usize,\n        nbSamples: ::core::ffi::c_uint,\n        parameters: ZDICT_legacy_params_t,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZDICT_addEntropyTablesFromBuffer(\n        dictBuffer: *mut ::core::ffi::c_void,\n        dictContentSize: usize,\n        dictBufferCapacity: usize,\n        samplesBuffer: *const ::core::ffi::c_void,\n        samplesSizes: *const usize,\n        nbSamples: ::core::ffi::c_uint,\n    ) -> usize;\n}\n"
  },
  {
    "path": "zstd-safe/zstd-sys/src/bindings_zstd.rs",
    "content": "/*\nThis file is auto-generated from the public API of the zstd library.\nIt is released under the same BSD license.\n\nBSD License\n\nFor Zstandard software\n\nCopyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n\n * Neither the name Facebook, nor Meta, nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n*/\n/* automatically generated by rust-bindgen 0.71.1 */\n\npub const ZSTD_VERSION_MAJOR: u32 = 1;\npub const ZSTD_VERSION_MINOR: u32 = 5;\npub const ZSTD_VERSION_RELEASE: u32 = 7;\npub const ZSTD_VERSION_NUMBER: u32 = 10507;\npub const ZSTD_CLEVEL_DEFAULT: u32 = 3;\npub const ZSTD_MAGICNUMBER: u32 = 4247762216;\npub const ZSTD_MAGIC_DICTIONARY: u32 = 3962610743;\npub const ZSTD_MAGIC_SKIPPABLE_START: u32 = 407710288;\npub const ZSTD_MAGIC_SKIPPABLE_MASK: u32 = 4294967280;\npub const ZSTD_BLOCKSIZELOG_MAX: u32 = 17;\npub const ZSTD_BLOCKSIZE_MAX: u32 = 131072;\npub const ZSTD_CONTENTSIZE_UNKNOWN: i32 = -1;\npub const ZSTD_CONTENTSIZE_ERROR: i32 = -2;\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_ErrorCode {\n    ZSTD_error_no_error = 0,\n    ZSTD_error_GENERIC = 1,\n    ZSTD_error_prefix_unknown = 10,\n    ZSTD_error_version_unsupported = 12,\n    ZSTD_error_frameParameter_unsupported = 14,\n    ZSTD_error_frameParameter_windowTooLarge = 16,\n    ZSTD_error_corruption_detected = 20,\n    ZSTD_error_checksum_wrong = 22,\n    ZSTD_error_literals_headerWrong = 24,\n    ZSTD_error_dictionary_corrupted = 30,\n    ZSTD_error_dictionary_wrong = 32,\n    ZSTD_error_dictionaryCreation_failed = 34,\n    ZSTD_error_parameter_unsupported = 40,\n    ZSTD_error_parameter_combination_unsupported = 41,\n    ZSTD_error_parameter_outOfBound = 42,\n    ZSTD_error_tableLog_tooLarge = 44,\n    ZSTD_error_maxSymbolValue_tooLarge = 46,\n    ZSTD_error_maxSymbolValue_tooSmall = 48,\n    ZSTD_error_cannotProduce_uncompressedBlock = 49,\n    ZSTD_error_stabilityCondition_notRespected = 50,\n    ZSTD_error_stage_wrong = 60,\n    ZSTD_error_init_missing = 62,\n    ZSTD_error_memory_allocation = 64,\n    ZSTD_error_workSpace_tooSmall = 66,\n    ZSTD_error_dstSize_tooSmall = 70,\n    ZSTD_error_srcSize_wrong = 72,\n    ZSTD_error_dstBuffer_null = 74,\n    ZSTD_error_noForwardProgress_destFull = 80,\n    ZSTD_error_noForwardProgress_inputEmpty = 82,\n    ZSTD_error_frameIndex_tooLarge = 100,\n    ZSTD_error_seekableIO = 102,\n    ZSTD_error_dstBuffer_wrong = 104,\n    ZSTD_error_srcBuffer_wrong = 105,\n    ZSTD_error_sequenceProducer_failed = 106,\n    ZSTD_error_externalSequences_invalid = 107,\n    ZSTD_error_maxCode = 120,\n}\nextern \"C\" {\n    pub fn ZSTD_getErrorString(\n        code: ZSTD_ErrorCode,\n    ) -> *const ::core::ffi::c_char;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_versionNumber() :\\n  Return runtime library version, the value is (MAJOR*100*100 + MINOR*100 + RELEASE).\"]\n    pub fn ZSTD_versionNumber() -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_versionString() :\\n  Return runtime library version, like \\\"1.4.5\\\". Requires v1.3.0+.\"]\n    pub fn ZSTD_versionString() -> *const ::core::ffi::c_char;\n}\nextern \"C\" {\n    #[doc = \"  Simple Core API\\n/\\n/*! ZSTD_compress() :\\n  Compresses `src` content as a single zstd compressed frame into already allocated `dst`.\\n  NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have\\n        enough space to successfully compress the data.\\n  @return : compressed size written into `dst` (<= `dstCapacity),\\n            or an error code if it fails (which can be tested using ZSTD_isError()).\"]\n    pub fn ZSTD_compress(\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_decompress() :\\n `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.\\n  Multiple compressed frames can be decompressed at once with this method.\\n  The result will be the concatenation of all decompressed frames, back to back.\\n `dstCapacity` is an upper bound of originalSize to regenerate.\\n  First frame's decompressed size can be extracted using ZSTD_getFrameContentSize().\\n  If maximum upper bound isn't known, prefer using streaming mode to decompress data.\\n @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),\\n           or an errorCode if it fails (which can be tested using ZSTD_isError()).\"]\n    pub fn ZSTD_decompress(\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        compressedSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_getFrameContentSize(\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> ::core::ffi::c_ulonglong;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_getDecompressedSize() (obsolete):\\n  This function is now obsolete, in favor of ZSTD_getFrameContentSize().\\n  Both functions work the same way, but ZSTD_getDecompressedSize() blends\\n  \\\"empty\\\", \\\"unknown\\\" and \\\"error\\\" results to the same return value (0),\\n  while ZSTD_getFrameContentSize() gives them separate return values.\\n @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise.\"]\n    pub fn ZSTD_getDecompressedSize(\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> ::core::ffi::c_ulonglong;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_findFrameCompressedSize() : Requires v1.4.0+\\n `src` should point to the start of a ZSTD frame or skippable frame.\\n `srcSize` must be >= first frame size\\n @return : the compressed size of the first frame starting at `src`,\\n           suitable to pass as `srcSize` to `ZSTD_decompress` or similar,\\n           or an error code if input is invalid\\n  Note 1: this method is called _find*() because it's not enough to read the header,\\n          it may have to scan through the frame's content, to reach its end.\\n  Note 2: this method also works with Skippable Frames. In which case,\\n          it returns the size of the complete skippable frame,\\n          which is always equal to its content size + 8 bytes for headers.\"]\n    pub fn ZSTD_findFrameCompressedSize(\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_compressBound(srcSize: usize) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_isError(result: usize) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    pub fn ZSTD_getErrorCode(functionResult: usize) -> ZSTD_ErrorCode;\n}\nextern \"C\" {\n    pub fn ZSTD_getErrorName(result: usize) -> *const ::core::ffi::c_char;\n}\nextern \"C\" {\n    pub fn ZSTD_minCLevel() -> ::core::ffi::c_int;\n}\nextern \"C\" {\n    pub fn ZSTD_maxCLevel() -> ::core::ffi::c_int;\n}\nextern \"C\" {\n    pub fn ZSTD_defaultCLevel() -> ::core::ffi::c_int;\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_CCtx_s {\n    _unused: [u8; 0],\n}\n#[doc = \"  Explicit context\"]\npub type ZSTD_CCtx = ZSTD_CCtx_s;\nextern \"C\" {\n    pub fn ZSTD_createCCtx() -> *mut ZSTD_CCtx;\n}\nextern \"C\" {\n    pub fn ZSTD_freeCCtx(cctx: *mut ZSTD_CCtx) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_compressCCtx() :\\n  Same as ZSTD_compress(), using an explicit ZSTD_CCtx.\\n  Important : in order to mirror `ZSTD_compress()` behavior,\\n  this function compresses at the requested compression level,\\n  __ignoring any other advanced parameter__ .\\n  If any advanced parameter was set using the advanced API,\\n  they will all be reset. Only @compressionLevel remains.\"]\n    pub fn ZSTD_compressCCtx(\n        cctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> usize;\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_DCtx_s {\n    _unused: [u8; 0],\n}\npub type ZSTD_DCtx = ZSTD_DCtx_s;\nextern \"C\" {\n    pub fn ZSTD_createDCtx() -> *mut ZSTD_DCtx;\n}\nextern \"C\" {\n    pub fn ZSTD_freeDCtx(dctx: *mut ZSTD_DCtx) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_decompressDCtx() :\\n  Same as ZSTD_decompress(),\\n  requires an allocated ZSTD_DCtx.\\n  Compatible with sticky parameters (see below).\"]\n    pub fn ZSTD_decompressDCtx(\n        dctx: *mut ZSTD_DCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\n#[repr(u32)]\n#[doc = \"  Advanced compression API (Requires v1.4.0+)\"]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_strategy {\n    ZSTD_fast = 1,\n    ZSTD_dfast = 2,\n    ZSTD_greedy = 3,\n    ZSTD_lazy = 4,\n    ZSTD_lazy2 = 5,\n    ZSTD_btlazy2 = 6,\n    ZSTD_btopt = 7,\n    ZSTD_btultra = 8,\n    ZSTD_btultra2 = 9,\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_cParameter {\n    ZSTD_c_compressionLevel = 100,\n    ZSTD_c_windowLog = 101,\n    ZSTD_c_hashLog = 102,\n    ZSTD_c_chainLog = 103,\n    ZSTD_c_searchLog = 104,\n    ZSTD_c_minMatch = 105,\n    ZSTD_c_targetLength = 106,\n    ZSTD_c_strategy = 107,\n    ZSTD_c_targetCBlockSize = 130,\n    ZSTD_c_enableLongDistanceMatching = 160,\n    ZSTD_c_ldmHashLog = 161,\n    ZSTD_c_ldmMinMatch = 162,\n    ZSTD_c_ldmBucketSizeLog = 163,\n    ZSTD_c_ldmHashRateLog = 164,\n    ZSTD_c_contentSizeFlag = 200,\n    ZSTD_c_checksumFlag = 201,\n    ZSTD_c_dictIDFlag = 202,\n    ZSTD_c_nbWorkers = 400,\n    ZSTD_c_jobSize = 401,\n    ZSTD_c_overlapLog = 402,\n    ZSTD_c_experimentalParam1 = 500,\n    ZSTD_c_experimentalParam2 = 10,\n    ZSTD_c_experimentalParam3 = 1000,\n    ZSTD_c_experimentalParam4 = 1001,\n    ZSTD_c_experimentalParam5 = 1002,\n    ZSTD_c_experimentalParam7 = 1004,\n    ZSTD_c_experimentalParam8 = 1005,\n    ZSTD_c_experimentalParam9 = 1006,\n    ZSTD_c_experimentalParam10 = 1007,\n    ZSTD_c_experimentalParam11 = 1008,\n    ZSTD_c_experimentalParam12 = 1009,\n    ZSTD_c_experimentalParam13 = 1010,\n    ZSTD_c_experimentalParam14 = 1011,\n    ZSTD_c_experimentalParam15 = 1012,\n    ZSTD_c_experimentalParam16 = 1013,\n    ZSTD_c_experimentalParam17 = 1014,\n    ZSTD_c_experimentalParam18 = 1015,\n    ZSTD_c_experimentalParam19 = 1016,\n    ZSTD_c_experimentalParam20 = 1017,\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_bounds {\n    pub error: usize,\n    pub lowerBound: ::core::ffi::c_int,\n    pub upperBound: ::core::ffi::c_int,\n}\nextern \"C\" {\n    #[doc = \" ZSTD_cParam_getBounds() :\\n  All parameters must belong to an interval with lower and upper bounds,\\n  otherwise they will either trigger an error or be automatically clamped.\\n @return : a structure, ZSTD_bounds, which contains\\n         - an error status field, which must be tested using ZSTD_isError()\\n         - lower and upper bounds, both inclusive\"]\n    pub fn ZSTD_cParam_getBounds(cParam: ZSTD_cParameter) -> ZSTD_bounds;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_setParameter() :\\n  Set one compression parameter, selected by enum ZSTD_cParameter.\\n  All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds().\\n  Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).\\n  Setting a parameter is generally only possible during frame initialization (before starting compression).\\n  Exception : when using multi-threading mode (nbWorkers >= 1),\\n              the following parameters can be updated _during_ compression (within same frame):\\n              => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy.\\n              new parameters will be active for next job only (after a flush()).\\n @return : an error code (which can be tested using ZSTD_isError()).\"]\n    pub fn ZSTD_CCtx_setParameter(\n        cctx: *mut ZSTD_CCtx,\n        param: ZSTD_cParameter,\n        value: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_setPledgedSrcSize() :\\n  Total input data size to be compressed as a single frame.\\n  Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag.\\n  This value will also be controlled at end of frame, and trigger an error if not respected.\\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\\n  Note 1 : pledgedSrcSize==0 actually means zero, aka an empty frame.\\n           In order to mean \\\"unknown content size\\\", pass constant ZSTD_CONTENTSIZE_UNKNOWN.\\n           ZSTD_CONTENTSIZE_UNKNOWN is default value for any new frame.\\n  Note 2 : pledgedSrcSize is only valid once, for the next frame.\\n           It's discarded at the end of the frame, and replaced by ZSTD_CONTENTSIZE_UNKNOWN.\\n  Note 3 : Whenever all input data is provided and consumed in a single round,\\n           for example with ZSTD_compress2(),\\n           or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end),\\n           this value is automatically overridden by srcSize instead.\"]\n    pub fn ZSTD_CCtx_setPledgedSrcSize(\n        cctx: *mut ZSTD_CCtx,\n        pledgedSrcSize: ::core::ffi::c_ulonglong,\n    ) -> usize;\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_ResetDirective {\n    ZSTD_reset_session_only = 1,\n    ZSTD_reset_parameters = 2,\n    ZSTD_reset_session_and_parameters = 3,\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_reset() :\\n  There are 2 different things that can be reset, independently or jointly :\\n  - The session : will stop compressing current frame, and make CCtx ready to start a new one.\\n                  Useful after an error, or to interrupt any ongoing compression.\\n                  Any internal data not yet flushed is cancelled.\\n                  Compression parameters and dictionary remain unchanged.\\n                  They will be used to compress next frame.\\n                  Resetting session never fails.\\n  - The parameters : changes all parameters back to \\\"default\\\".\\n                  This also removes any reference to any dictionary or external sequence producer.\\n                  Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)\\n                  otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())\\n  - Both : similar to resetting the session, followed by resetting parameters.\"]\n    pub fn ZSTD_CCtx_reset(\n        cctx: *mut ZSTD_CCtx,\n        reset: ZSTD_ResetDirective,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_compress2() :\\n  Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.\\n  (note that this entry point doesn't even expose a compression level parameter).\\n  ZSTD_compress2() always starts a new frame.\\n  Should cctx hold data from a previously unfinished frame, everything about it is forgotten.\\n  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()\\n  - The function is always blocking, returns when compression is completed.\\n  NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have\\n        enough space to successfully compress the data, though it is possible it fails for other reasons.\\n @return : compressed size written into `dst` (<= `dstCapacity),\\n           or an error code if it fails (which can be tested using ZSTD_isError()).\"]\n    pub fn ZSTD_compress2(\n        cctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\n#[repr(u32)]\n#[doc = \"  Advanced decompression API (Requires v1.4.0+)\"]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_dParameter {\n    ZSTD_d_windowLogMax = 100,\n    ZSTD_d_experimentalParam1 = 1000,\n    ZSTD_d_experimentalParam2 = 1001,\n    ZSTD_d_experimentalParam3 = 1002,\n    ZSTD_d_experimentalParam4 = 1003,\n    ZSTD_d_experimentalParam5 = 1004,\n    ZSTD_d_experimentalParam6 = 1005,\n}\nextern \"C\" {\n    #[doc = \" ZSTD_dParam_getBounds() :\\n  All parameters must belong to an interval with lower and upper bounds,\\n  otherwise they will either trigger an error or be automatically clamped.\\n @return : a structure, ZSTD_bounds, which contains\\n         - an error status field, which must be tested using ZSTD_isError()\\n         - both lower and upper bounds, inclusive\"]\n    pub fn ZSTD_dParam_getBounds(dParam: ZSTD_dParameter) -> ZSTD_bounds;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_setParameter() :\\n  Set one compression parameter, selected by enum ZSTD_dParameter.\\n  All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds().\\n  Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).\\n  Setting a parameter is only possible during frame initialization (before starting decompression).\\n @return : 0, or an error code (which can be tested using ZSTD_isError()).\"]\n    pub fn ZSTD_DCtx_setParameter(\n        dctx: *mut ZSTD_DCtx,\n        param: ZSTD_dParameter,\n        value: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_reset() :\\n  Return a DCtx to clean state.\\n  Session and parameters can be reset jointly or separately.\\n  Parameters can only be reset when no active frame is being decompressed.\\n @return : 0, or an error code, which can be tested with ZSTD_isError()\"]\n    pub fn ZSTD_DCtx_reset(\n        dctx: *mut ZSTD_DCtx,\n        reset: ZSTD_ResetDirective,\n    ) -> usize;\n}\n#[doc = \"  Streaming\"]\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_inBuffer_s {\n    #[doc = \"< start of input buffer\"]\n    pub src: *const ::core::ffi::c_void,\n    #[doc = \"< size of input buffer\"]\n    pub size: usize,\n    #[doc = \"< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size\"]\n    pub pos: usize,\n}\n#[doc = \"  Streaming\"]\npub type ZSTD_inBuffer = ZSTD_inBuffer_s;\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_outBuffer_s {\n    #[doc = \"< start of output buffer\"]\n    pub dst: *mut ::core::ffi::c_void,\n    #[doc = \"< size of output buffer\"]\n    pub size: usize,\n    #[doc = \"< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size\"]\n    pub pos: usize,\n}\npub type ZSTD_outBuffer = ZSTD_outBuffer_s;\npub type ZSTD_CStream = ZSTD_CCtx;\nextern \"C\" {\n    pub fn ZSTD_createCStream() -> *mut ZSTD_CStream;\n}\nextern \"C\" {\n    pub fn ZSTD_freeCStream(zcs: *mut ZSTD_CStream) -> usize;\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_EndDirective {\n    ZSTD_e_continue = 0,\n    ZSTD_e_flush = 1,\n    ZSTD_e_end = 2,\n}\nextern \"C\" {\n    #[doc = \" ZSTD_compressStream2() : Requires v1.4.0+\\n  Behaves about the same as ZSTD_compressStream, with additional control on end directive.\\n  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()\\n  - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)\\n  - output->pos must be <= dstCapacity, input->pos must be <= srcSize\\n  - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.\\n  - endOp must be a valid directive\\n  - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.\\n  - When nbWorkers>=1, function is non-blocking : it copies a portion of input, distributes jobs to internal worker threads, flush to output whatever is available,\\n                                                  and then immediately returns, just indicating that there is some data remaining to be flushed.\\n                                                  The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.\\n  - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.\\n  - @return provides a minimum amount of data remaining to be flushed from internal buffers\\n            or an error code, which can be tested using ZSTD_isError().\\n            if @return != 0, flush is not fully completed, there is still some data left within internal buffers.\\n            This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.\\n            For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.\\n  - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),\\n            only ZSTD_e_end or ZSTD_e_flush operations are allowed.\\n            Before starting a new compression job, or changing compression parameters,\\n            it is required to fully flush internal buffers.\\n  - note: if an operation ends with an error, it may leave @cctx in an undefined state.\\n          Therefore, it's UB to invoke ZSTD_compressStream2() of ZSTD_compressStream() on such a state.\\n          In order to be re-employed after an error, a state must be reset,\\n          which can be done explicitly (ZSTD_CCtx_reset()),\\n          or is sometimes implied by methods starting a new compression job (ZSTD_initCStream(), ZSTD_compressCCtx())\"]\n    pub fn ZSTD_compressStream2(\n        cctx: *mut ZSTD_CCtx,\n        output: *mut ZSTD_outBuffer,\n        input: *mut ZSTD_inBuffer,\n        endOp: ZSTD_EndDirective,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_CStreamInSize() -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_CStreamOutSize() -> usize;\n}\nextern \"C\" {\n    #[doc = \" Equivalent to:\\n\\n     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\\n     ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)\\n     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);\\n\\n Note that ZSTD_initCStream() clears any previously set dictionary. Use the new API\\n to compress with a dictionary.\"]\n    pub fn ZSTD_initCStream(\n        zcs: *mut ZSTD_CStream,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue).\\n NOTE: The return value is different. ZSTD_compressStream() returns a hint for\\n the next read size (if non-zero and not an error). ZSTD_compressStream2()\\n returns the minimum nb of bytes left to flush (if non-zero and not an error).\"]\n    pub fn ZSTD_compressStream(\n        zcs: *mut ZSTD_CStream,\n        output: *mut ZSTD_outBuffer,\n        input: *mut ZSTD_inBuffer,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush).\"]\n    pub fn ZSTD_flushStream(\n        zcs: *mut ZSTD_CStream,\n        output: *mut ZSTD_outBuffer,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end).\"]\n    pub fn ZSTD_endStream(\n        zcs: *mut ZSTD_CStream,\n        output: *mut ZSTD_outBuffer,\n    ) -> usize;\n}\npub type ZSTD_DStream = ZSTD_DCtx;\nextern \"C\" {\n    pub fn ZSTD_createDStream() -> *mut ZSTD_DStream;\n}\nextern \"C\" {\n    pub fn ZSTD_freeDStream(zds: *mut ZSTD_DStream) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_initDStream() :\\n Initialize/reset DStream state for new decompression operation.\\n Call before new decompression operation using same DStream.\\n\\n Note : This function is redundant with the advanced API and equivalent to:\\n     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);\\n     ZSTD_DCtx_refDDict(zds, NULL);\"]\n    pub fn ZSTD_initDStream(zds: *mut ZSTD_DStream) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_decompressStream() :\\n Streaming decompression function.\\n Call repetitively to consume full input updating it as necessary.\\n Function will update both input and output `pos` fields exposing current state via these fields:\\n - `input.pos < input.size`, some input remaining and caller should provide remaining input\\n   on the next call.\\n - `output.pos < output.size`, decoder flushed internal output buffer.\\n - `output.pos == output.size`, unflushed data potentially present in the internal buffers,\\n   check ZSTD_decompressStream() @return value,\\n   if > 0, invoke it again to flush remaining data to output.\\n Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX.\\n\\n @return : 0 when a frame is completely decoded and fully flushed,\\n           or an error code, which can be tested using ZSTD_isError(),\\n           or any other value > 0, which means there is some decoding or flushing to do to complete current frame.\\n\\n Note: when an operation returns with an error code, the @zds state may be left in undefined state.\\n       It's UB to invoke `ZSTD_decompressStream()` on such a state.\\n       In order to re-use such a state, it must be first reset,\\n       which can be done explicitly (`ZSTD_DCtx_reset()`),\\n       or is implied for operations starting some new decompression job (`ZSTD_initDStream`, `ZSTD_decompressDCtx()`, `ZSTD_decompress_usingDict()`)\"]\n    pub fn ZSTD_decompressStream(\n        zds: *mut ZSTD_DStream,\n        output: *mut ZSTD_outBuffer,\n        input: *mut ZSTD_inBuffer,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_DStreamInSize() -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_DStreamOutSize() -> usize;\n}\nextern \"C\" {\n    #[doc = \"  Simple dictionary API\\n/\\n/*! ZSTD_compress_usingDict() :\\n  Compression at an explicit compression level using a Dictionary.\\n  A dictionary can be any arbitrary data segment (also called a prefix),\\n  or a buffer with specified information (see zdict.h).\\n  Note : This function loads the dictionary, resulting in significant startup delay.\\n         It's intended for a dictionary used only once.\\n  Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used.\"]\n    pub fn ZSTD_compress_usingDict(\n        ctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_decompress_usingDict() :\\n  Decompression using a known Dictionary.\\n  Dictionary must be identical to the one used during compression.\\n  Note : This function loads the dictionary, resulting in significant startup delay.\\n         It's intended for a dictionary used only once.\\n  Note : When `dict == NULL || dictSize < 8` no dictionary is used.\"]\n    pub fn ZSTD_decompress_usingDict(\n        dctx: *mut ZSTD_DCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> usize;\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_CDict_s {\n    _unused: [u8; 0],\n}\n#[doc = \"  Bulk processing dictionary API\"]\npub type ZSTD_CDict = ZSTD_CDict_s;\nextern \"C\" {\n    #[doc = \" ZSTD_createCDict() :\\n  When compressing multiple messages or blocks using the same dictionary,\\n  it's recommended to digest the dictionary only once, since it's a costly operation.\\n  ZSTD_createCDict() will create a state from digesting a dictionary.\\n  The resulting state can be used for future compression operations with very limited startup cost.\\n  ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.\\n @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict.\\n  Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content.\\n  Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer,\\n      in which case the only thing that it transports is the @compressionLevel.\\n      This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively,\\n      expecting a ZSTD_CDict parameter with any data, including those without a known dictionary.\"]\n    pub fn ZSTD_createCDict(\n        dictBuffer: *const ::core::ffi::c_void,\n        dictSize: usize,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> *mut ZSTD_CDict;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_freeCDict() :\\n  Function frees memory allocated by ZSTD_createCDict().\\n  If a NULL pointer is passed, no operation is performed.\"]\n    pub fn ZSTD_freeCDict(CDict: *mut ZSTD_CDict) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_compress_usingCDict() :\\n  Compression using a digested Dictionary.\\n  Recommended when same dictionary is used multiple times.\\n  Note : compression level is _decided at dictionary creation time_,\\n     and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no)\"]\n    pub fn ZSTD_compress_usingCDict(\n        cctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        cdict: *const ZSTD_CDict,\n    ) -> usize;\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_DDict_s {\n    _unused: [u8; 0],\n}\npub type ZSTD_DDict = ZSTD_DDict_s;\nextern \"C\" {\n    #[doc = \" ZSTD_createDDict() :\\n  Create a digested dictionary, ready to start decompression operation without startup delay.\\n  dictBuffer can be released after DDict creation, as its content is copied inside DDict.\"]\n    pub fn ZSTD_createDDict(\n        dictBuffer: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> *mut ZSTD_DDict;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_freeDDict() :\\n  Function frees memory allocated with ZSTD_createDDict()\\n  If a NULL pointer is passed, no operation is performed.\"]\n    pub fn ZSTD_freeDDict(ddict: *mut ZSTD_DDict) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_decompress_usingDDict() :\\n  Decompression using a digested Dictionary.\\n  Recommended when same dictionary is used multiple times.\"]\n    pub fn ZSTD_decompress_usingDDict(\n        dctx: *mut ZSTD_DCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        ddict: *const ZSTD_DDict,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_getDictID_fromDict() : Requires v1.4.0+\\n  Provides the dictID stored within dictionary.\\n  if @return == 0, the dictionary is not conformant with Zstandard specification.\\n  It can still be loaded, but as a content-only dictionary.\"]\n    pub fn ZSTD_getDictID_fromDict(\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_getDictID_fromCDict() : Requires v1.5.0+\\n  Provides the dictID of the dictionary loaded into `cdict`.\\n  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.\\n  Non-conformant dictionaries can still be loaded, but as content-only dictionaries.\"]\n    pub fn ZSTD_getDictID_fromCDict(\n        cdict: *const ZSTD_CDict,\n    ) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_getDictID_fromDDict() : Requires v1.4.0+\\n  Provides the dictID of the dictionary loaded into `ddict`.\\n  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.\\n  Non-conformant dictionaries can still be loaded, but as content-only dictionaries.\"]\n    pub fn ZSTD_getDictID_fromDDict(\n        ddict: *const ZSTD_DDict,\n    ) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_getDictID_fromFrame() : Requires v1.4.0+\\n  Provides the dictID required to decompressed the frame stored within `src`.\\n  If @return == 0, the dictID could not be decoded.\\n  This could for one of the following reasons :\\n  - The frame does not require a dictionary to be decoded (most common case).\\n  - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden piece of information.\\n    Note : this use case also happens when using a non-conformant dictionary.\\n  - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).\\n  - This is not a Zstandard frame.\\n  When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code.\"]\n    pub fn ZSTD_getDictID_fromFrame(\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_loadDictionary() : Requires v1.4.0+\\n  Create an internal CDict from `dict` buffer.\\n  Decompression will have to use same dictionary.\\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\\n  Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary,\\n           meaning \\\"return to no-dictionary mode\\\".\\n  Note 1 : Dictionary is sticky, it will be used for all future compressed frames,\\n           until parameters are reset, a new dictionary is loaded, or the dictionary\\n           is explicitly invalidated by loading a NULL dictionary.\\n  Note 2 : Loading a dictionary involves building tables.\\n           It's also a CPU consuming operation, with non-negligible impact on latency.\\n           Tables are dependent on compression parameters, and for this reason,\\n           compression parameters can no longer be changed after loading a dictionary.\\n  Note 3 :`dict` content will be copied internally.\\n           Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead.\\n           In such a case, dictionary buffer must outlive its users.\\n  Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()\\n           to precisely select how dictionary content must be interpreted.\\n  Note 5 : This method does not benefit from LDM (long distance mode).\\n           If you want to employ LDM on some large dictionary content,\\n           prefer employing ZSTD_CCtx_refPrefix() described below.\"]\n    pub fn ZSTD_CCtx_loadDictionary(\n        cctx: *mut ZSTD_CCtx,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_refCDict() : Requires v1.4.0+\\n  Reference a prepared dictionary, to be used for all future compressed frames.\\n  Note that compression parameters are enforced from within CDict,\\n  and supersede any compression parameter previously set within CCtx.\\n  The parameters ignored are labelled as \\\"superseded-by-cdict\\\" in the ZSTD_cParameter enum docs.\\n  The ignored parameters will be used again if the CCtx is returned to no-dictionary mode.\\n  The dictionary will remain valid for future compressed frames using same CCtx.\\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\\n  Special : Referencing a NULL CDict means \\\"return to no-dictionary mode\\\".\\n  Note 1 : Currently, only one dictionary can be managed.\\n           Referencing a new dictionary effectively \\\"discards\\\" any previous one.\\n  Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx.\"]\n    pub fn ZSTD_CCtx_refCDict(\n        cctx: *mut ZSTD_CCtx,\n        cdict: *const ZSTD_CDict,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_refPrefix() : Requires v1.4.0+\\n  Reference a prefix (single-usage dictionary) for next compressed frame.\\n  A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end).\\n  Decompression will need same prefix to properly regenerate data.\\n  Compressing with a prefix is similar in outcome as performing a diff and compressing it,\\n  but performs much faster, especially during decompression (compression speed is tunable with compression level).\\n  This method is compatible with LDM (long distance mode).\\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\\n  Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary\\n  Note 1 : Prefix buffer is referenced. It **must** outlive compression.\\n           Its content must remain unmodified during compression.\\n  Note 2 : If the intention is to diff some large src data blob with some prior version of itself,\\n           ensure that the window size is large enough to contain the entire source.\\n           See ZSTD_c_windowLog.\\n  Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters.\\n           It's a CPU consuming operation, with non-negligible impact on latency.\\n           If there is a need to use the same prefix multiple times, consider loadDictionary instead.\\n  Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent).\\n           Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation.\"]\n    pub fn ZSTD_CCtx_refPrefix(\n        cctx: *mut ZSTD_CCtx,\n        prefix: *const ::core::ffi::c_void,\n        prefixSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_loadDictionary() : Requires v1.4.0+\\n  Create an internal DDict from dict buffer, to be used to decompress all future frames.\\n  The dictionary remains valid for all future frames, until explicitly invalidated, or\\n  a new dictionary is loaded.\\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\\n  Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,\\n            meaning \\\"return to no-dictionary mode\\\".\\n  Note 1 : Loading a dictionary involves building tables,\\n           which has a non-negligible impact on CPU usage and latency.\\n           It's recommended to \\\"load once, use many times\\\", to amortize the cost\\n  Note 2 :`dict` content will be copied internally, so `dict` can be released after loading.\\n           Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead.\\n  Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of\\n           how dictionary content is loaded and interpreted.\"]\n    pub fn ZSTD_DCtx_loadDictionary(\n        dctx: *mut ZSTD_DCtx,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_refDDict() : Requires v1.4.0+\\n  Reference a prepared dictionary, to be used to decompress next frames.\\n  The dictionary remains active for decompression of future frames using same DCtx.\\n\\n  If called with ZSTD_d_refMultipleDDicts enabled, repeated calls of this function\\n  will store the DDict references in a table, and the DDict used for decompression\\n  will be determined at decompression time, as per the dict ID in the frame.\\n  The memory for the table is allocated on the first call to refDDict, and can be\\n  freed with ZSTD_freeDCtx().\\n\\n  If called with ZSTD_d_refMultipleDDicts disabled (the default), only one dictionary\\n  will be managed, and referencing a dictionary effectively \\\"discards\\\" any previous one.\\n\\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\\n  Special: referencing a NULL DDict means \\\"return to no-dictionary mode\\\".\\n  Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx.\"]\n    pub fn ZSTD_DCtx_refDDict(\n        dctx: *mut ZSTD_DCtx,\n        ddict: *const ZSTD_DDict,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_refPrefix() : Requires v1.4.0+\\n  Reference a prefix (single-usage dictionary) to decompress next frame.\\n  This is the reverse operation of ZSTD_CCtx_refPrefix(),\\n  and must use the same prefix as the one used during compression.\\n  Prefix is **only used once**. Reference is discarded at end of frame.\\n  End of frame is reached when ZSTD_decompressStream() returns 0.\\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\\n  Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary\\n  Note 2 : Prefix buffer is referenced. It **must** outlive decompression.\\n           Prefix buffer must remain unmodified up to the end of frame,\\n           reached when ZSTD_decompressStream() returns 0.\\n  Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent).\\n           Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section)\\n  Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.\\n           A full dictionary is more costly, as it requires building tables.\"]\n    pub fn ZSTD_DCtx_refPrefix(\n        dctx: *mut ZSTD_DCtx,\n        prefix: *const ::core::ffi::c_void,\n        prefixSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_sizeof_*() : Requires v1.4.0+\\n  These functions give the _current_ memory usage of selected object.\\n  Note that object memory usage can evolve (increase or decrease) over time.\"]\n    pub fn ZSTD_sizeof_CCtx(cctx: *const ZSTD_CCtx) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_sizeof_DCtx(dctx: *const ZSTD_DCtx) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_sizeof_CStream(zcs: *const ZSTD_CStream) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_sizeof_DStream(zds: *const ZSTD_DStream) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_sizeof_CDict(cdict: *const ZSTD_CDict) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_sizeof_DDict(ddict: *const ZSTD_DDict) -> usize;\n}\n"
  },
  {
    "path": "zstd-safe/zstd-sys/src/bindings_zstd_experimental.rs",
    "content": "/*\nThis file is auto-generated from the public API of the zstd library.\nIt is released under the same BSD license.\n\nBSD License\n\nFor Zstandard software\n\nCopyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n\n * Neither the name Facebook, nor Meta, nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n*/\n/* automatically generated by rust-bindgen 0.71.1 */\n\npub const ZSTD_VERSION_MAJOR: u32 = 1;\npub const ZSTD_VERSION_MINOR: u32 = 5;\npub const ZSTD_VERSION_RELEASE: u32 = 7;\npub const ZSTD_VERSION_NUMBER: u32 = 10507;\npub const ZSTD_CLEVEL_DEFAULT: u32 = 3;\npub const ZSTD_MAGICNUMBER: u32 = 4247762216;\npub const ZSTD_MAGIC_DICTIONARY: u32 = 3962610743;\npub const ZSTD_MAGIC_SKIPPABLE_START: u32 = 407710288;\npub const ZSTD_MAGIC_SKIPPABLE_MASK: u32 = 4294967280;\npub const ZSTD_BLOCKSIZELOG_MAX: u32 = 17;\npub const ZSTD_BLOCKSIZE_MAX: u32 = 131072;\npub const ZSTD_CONTENTSIZE_UNKNOWN: i32 = -1;\npub const ZSTD_CONTENTSIZE_ERROR: i32 = -2;\npub const ZSTD_FRAMEHEADERSIZE_MAX: u32 = 18;\npub const ZSTD_SKIPPABLEHEADERSIZE: u32 = 8;\npub const ZSTD_WINDOWLOG_MAX_32: u32 = 30;\npub const ZSTD_WINDOWLOG_MAX_64: u32 = 31;\npub const ZSTD_WINDOWLOG_MIN: u32 = 10;\npub const ZSTD_HASHLOG_MIN: u32 = 6;\npub const ZSTD_CHAINLOG_MAX_32: u32 = 29;\npub const ZSTD_CHAINLOG_MAX_64: u32 = 30;\npub const ZSTD_CHAINLOG_MIN: u32 = 6;\npub const ZSTD_SEARCHLOG_MIN: u32 = 1;\npub const ZSTD_MINMATCH_MAX: u32 = 7;\npub const ZSTD_MINMATCH_MIN: u32 = 3;\npub const ZSTD_TARGETLENGTH_MAX: u32 = 131072;\npub const ZSTD_TARGETLENGTH_MIN: u32 = 0;\npub const ZSTD_BLOCKSIZE_MAX_MIN: u32 = 1024;\npub const ZSTD_OVERLAPLOG_MIN: u32 = 0;\npub const ZSTD_OVERLAPLOG_MAX: u32 = 9;\npub const ZSTD_WINDOWLOG_LIMIT_DEFAULT: u32 = 27;\npub const ZSTD_LDM_HASHLOG_MIN: u32 = 6;\npub const ZSTD_LDM_MINMATCH_MIN: u32 = 4;\npub const ZSTD_LDM_MINMATCH_MAX: u32 = 4096;\npub const ZSTD_LDM_BUCKETSIZELOG_MIN: u32 = 1;\npub const ZSTD_LDM_BUCKETSIZELOG_MAX: u32 = 8;\npub const ZSTD_LDM_HASHRATELOG_MIN: u32 = 0;\npub const ZSTD_TARGETCBLOCKSIZE_MIN: u32 = 1340;\npub const ZSTD_TARGETCBLOCKSIZE_MAX: u32 = 131072;\npub const ZSTD_SRCSIZEHINT_MIN: u32 = 0;\npub const ZSTD_BLOCKSPLITTER_LEVEL_MAX: u32 = 6;\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_ErrorCode {\n    ZSTD_error_no_error = 0,\n    ZSTD_error_GENERIC = 1,\n    ZSTD_error_prefix_unknown = 10,\n    ZSTD_error_version_unsupported = 12,\n    ZSTD_error_frameParameter_unsupported = 14,\n    ZSTD_error_frameParameter_windowTooLarge = 16,\n    ZSTD_error_corruption_detected = 20,\n    ZSTD_error_checksum_wrong = 22,\n    ZSTD_error_literals_headerWrong = 24,\n    ZSTD_error_dictionary_corrupted = 30,\n    ZSTD_error_dictionary_wrong = 32,\n    ZSTD_error_dictionaryCreation_failed = 34,\n    ZSTD_error_parameter_unsupported = 40,\n    ZSTD_error_parameter_combination_unsupported = 41,\n    ZSTD_error_parameter_outOfBound = 42,\n    ZSTD_error_tableLog_tooLarge = 44,\n    ZSTD_error_maxSymbolValue_tooLarge = 46,\n    ZSTD_error_maxSymbolValue_tooSmall = 48,\n    ZSTD_error_cannotProduce_uncompressedBlock = 49,\n    ZSTD_error_stabilityCondition_notRespected = 50,\n    ZSTD_error_stage_wrong = 60,\n    ZSTD_error_init_missing = 62,\n    ZSTD_error_memory_allocation = 64,\n    ZSTD_error_workSpace_tooSmall = 66,\n    ZSTD_error_dstSize_tooSmall = 70,\n    ZSTD_error_srcSize_wrong = 72,\n    ZSTD_error_dstBuffer_null = 74,\n    ZSTD_error_noForwardProgress_destFull = 80,\n    ZSTD_error_noForwardProgress_inputEmpty = 82,\n    ZSTD_error_frameIndex_tooLarge = 100,\n    ZSTD_error_seekableIO = 102,\n    ZSTD_error_dstBuffer_wrong = 104,\n    ZSTD_error_srcBuffer_wrong = 105,\n    ZSTD_error_sequenceProducer_failed = 106,\n    ZSTD_error_externalSequences_invalid = 107,\n    ZSTD_error_maxCode = 120,\n}\nextern \"C\" {\n    pub fn ZSTD_getErrorString(\n        code: ZSTD_ErrorCode,\n    ) -> *const ::core::ffi::c_char;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_versionNumber() :\\n  Return runtime library version, the value is (MAJOR*100*100 + MINOR*100 + RELEASE).\"]\n    pub fn ZSTD_versionNumber() -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_versionString() :\\n  Return runtime library version, like \\\"1.4.5\\\". Requires v1.3.0+.\"]\n    pub fn ZSTD_versionString() -> *const ::core::ffi::c_char;\n}\nextern \"C\" {\n    #[doc = \"  Simple Core API\\n/\\n/*! ZSTD_compress() :\\n  Compresses `src` content as a single zstd compressed frame into already allocated `dst`.\\n  NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have\\n        enough space to successfully compress the data.\\n  @return : compressed size written into `dst` (<= `dstCapacity),\\n            or an error code if it fails (which can be tested using ZSTD_isError()).\"]\n    pub fn ZSTD_compress(\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_decompress() :\\n `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.\\n  Multiple compressed frames can be decompressed at once with this method.\\n  The result will be the concatenation of all decompressed frames, back to back.\\n `dstCapacity` is an upper bound of originalSize to regenerate.\\n  First frame's decompressed size can be extracted using ZSTD_getFrameContentSize().\\n  If maximum upper bound isn't known, prefer using streaming mode to decompress data.\\n @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),\\n           or an errorCode if it fails (which can be tested using ZSTD_isError()).\"]\n    pub fn ZSTD_decompress(\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        compressedSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_getFrameContentSize(\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> ::core::ffi::c_ulonglong;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_getDecompressedSize() (obsolete):\\n  This function is now obsolete, in favor of ZSTD_getFrameContentSize().\\n  Both functions work the same way, but ZSTD_getDecompressedSize() blends\\n  \\\"empty\\\", \\\"unknown\\\" and \\\"error\\\" results to the same return value (0),\\n  while ZSTD_getFrameContentSize() gives them separate return values.\\n @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise.\"]\n    pub fn ZSTD_getDecompressedSize(\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> ::core::ffi::c_ulonglong;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_findFrameCompressedSize() : Requires v1.4.0+\\n `src` should point to the start of a ZSTD frame or skippable frame.\\n `srcSize` must be >= first frame size\\n @return : the compressed size of the first frame starting at `src`,\\n           suitable to pass as `srcSize` to `ZSTD_decompress` or similar,\\n           or an error code if input is invalid\\n  Note 1: this method is called _find*() because it's not enough to read the header,\\n          it may have to scan through the frame's content, to reach its end.\\n  Note 2: this method also works with Skippable Frames. In which case,\\n          it returns the size of the complete skippable frame,\\n          which is always equal to its content size + 8 bytes for headers.\"]\n    pub fn ZSTD_findFrameCompressedSize(\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_compressBound(srcSize: usize) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_isError(result: usize) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    pub fn ZSTD_getErrorCode(functionResult: usize) -> ZSTD_ErrorCode;\n}\nextern \"C\" {\n    pub fn ZSTD_getErrorName(result: usize) -> *const ::core::ffi::c_char;\n}\nextern \"C\" {\n    pub fn ZSTD_minCLevel() -> ::core::ffi::c_int;\n}\nextern \"C\" {\n    pub fn ZSTD_maxCLevel() -> ::core::ffi::c_int;\n}\nextern \"C\" {\n    pub fn ZSTD_defaultCLevel() -> ::core::ffi::c_int;\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_CCtx_s {\n    _unused: [u8; 0],\n}\n#[doc = \"  Explicit context\"]\npub type ZSTD_CCtx = ZSTD_CCtx_s;\nextern \"C\" {\n    pub fn ZSTD_createCCtx() -> *mut ZSTD_CCtx;\n}\nextern \"C\" {\n    pub fn ZSTD_freeCCtx(cctx: *mut ZSTD_CCtx) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_compressCCtx() :\\n  Same as ZSTD_compress(), using an explicit ZSTD_CCtx.\\n  Important : in order to mirror `ZSTD_compress()` behavior,\\n  this function compresses at the requested compression level,\\n  __ignoring any other advanced parameter__ .\\n  If any advanced parameter was set using the advanced API,\\n  they will all be reset. Only @compressionLevel remains.\"]\n    pub fn ZSTD_compressCCtx(\n        cctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> usize;\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_DCtx_s {\n    _unused: [u8; 0],\n}\npub type ZSTD_DCtx = ZSTD_DCtx_s;\nextern \"C\" {\n    pub fn ZSTD_createDCtx() -> *mut ZSTD_DCtx;\n}\nextern \"C\" {\n    pub fn ZSTD_freeDCtx(dctx: *mut ZSTD_DCtx) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_decompressDCtx() :\\n  Same as ZSTD_decompress(),\\n  requires an allocated ZSTD_DCtx.\\n  Compatible with sticky parameters (see below).\"]\n    pub fn ZSTD_decompressDCtx(\n        dctx: *mut ZSTD_DCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\n#[repr(u32)]\n#[doc = \"  Advanced compression API (Requires v1.4.0+)\"]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_strategy {\n    ZSTD_fast = 1,\n    ZSTD_dfast = 2,\n    ZSTD_greedy = 3,\n    ZSTD_lazy = 4,\n    ZSTD_lazy2 = 5,\n    ZSTD_btlazy2 = 6,\n    ZSTD_btopt = 7,\n    ZSTD_btultra = 8,\n    ZSTD_btultra2 = 9,\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_cParameter {\n    ZSTD_c_compressionLevel = 100,\n    ZSTD_c_windowLog = 101,\n    ZSTD_c_hashLog = 102,\n    ZSTD_c_chainLog = 103,\n    ZSTD_c_searchLog = 104,\n    ZSTD_c_minMatch = 105,\n    ZSTD_c_targetLength = 106,\n    ZSTD_c_strategy = 107,\n    ZSTD_c_targetCBlockSize = 130,\n    ZSTD_c_enableLongDistanceMatching = 160,\n    ZSTD_c_ldmHashLog = 161,\n    ZSTD_c_ldmMinMatch = 162,\n    ZSTD_c_ldmBucketSizeLog = 163,\n    ZSTD_c_ldmHashRateLog = 164,\n    ZSTD_c_contentSizeFlag = 200,\n    ZSTD_c_checksumFlag = 201,\n    ZSTD_c_dictIDFlag = 202,\n    ZSTD_c_nbWorkers = 400,\n    ZSTD_c_jobSize = 401,\n    ZSTD_c_overlapLog = 402,\n    ZSTD_c_experimentalParam1 = 500,\n    ZSTD_c_experimentalParam2 = 10,\n    ZSTD_c_experimentalParam3 = 1000,\n    ZSTD_c_experimentalParam4 = 1001,\n    ZSTD_c_experimentalParam5 = 1002,\n    ZSTD_c_experimentalParam7 = 1004,\n    ZSTD_c_experimentalParam8 = 1005,\n    ZSTD_c_experimentalParam9 = 1006,\n    ZSTD_c_experimentalParam10 = 1007,\n    ZSTD_c_experimentalParam11 = 1008,\n    ZSTD_c_experimentalParam12 = 1009,\n    ZSTD_c_experimentalParam13 = 1010,\n    ZSTD_c_experimentalParam14 = 1011,\n    ZSTD_c_experimentalParam15 = 1012,\n    ZSTD_c_experimentalParam16 = 1013,\n    ZSTD_c_experimentalParam17 = 1014,\n    ZSTD_c_experimentalParam18 = 1015,\n    ZSTD_c_experimentalParam19 = 1016,\n    ZSTD_c_experimentalParam20 = 1017,\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_bounds {\n    pub error: usize,\n    pub lowerBound: ::core::ffi::c_int,\n    pub upperBound: ::core::ffi::c_int,\n}\nextern \"C\" {\n    #[doc = \" ZSTD_cParam_getBounds() :\\n  All parameters must belong to an interval with lower and upper bounds,\\n  otherwise they will either trigger an error or be automatically clamped.\\n @return : a structure, ZSTD_bounds, which contains\\n         - an error status field, which must be tested using ZSTD_isError()\\n         - lower and upper bounds, both inclusive\"]\n    pub fn ZSTD_cParam_getBounds(cParam: ZSTD_cParameter) -> ZSTD_bounds;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_setParameter() :\\n  Set one compression parameter, selected by enum ZSTD_cParameter.\\n  All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds().\\n  Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).\\n  Setting a parameter is generally only possible during frame initialization (before starting compression).\\n  Exception : when using multi-threading mode (nbWorkers >= 1),\\n              the following parameters can be updated _during_ compression (within same frame):\\n              => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy.\\n              new parameters will be active for next job only (after a flush()).\\n @return : an error code (which can be tested using ZSTD_isError()).\"]\n    pub fn ZSTD_CCtx_setParameter(\n        cctx: *mut ZSTD_CCtx,\n        param: ZSTD_cParameter,\n        value: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_setPledgedSrcSize() :\\n  Total input data size to be compressed as a single frame.\\n  Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag.\\n  This value will also be controlled at end of frame, and trigger an error if not respected.\\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\\n  Note 1 : pledgedSrcSize==0 actually means zero, aka an empty frame.\\n           In order to mean \\\"unknown content size\\\", pass constant ZSTD_CONTENTSIZE_UNKNOWN.\\n           ZSTD_CONTENTSIZE_UNKNOWN is default value for any new frame.\\n  Note 2 : pledgedSrcSize is only valid once, for the next frame.\\n           It's discarded at the end of the frame, and replaced by ZSTD_CONTENTSIZE_UNKNOWN.\\n  Note 3 : Whenever all input data is provided and consumed in a single round,\\n           for example with ZSTD_compress2(),\\n           or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end),\\n           this value is automatically overridden by srcSize instead.\"]\n    pub fn ZSTD_CCtx_setPledgedSrcSize(\n        cctx: *mut ZSTD_CCtx,\n        pledgedSrcSize: ::core::ffi::c_ulonglong,\n    ) -> usize;\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_ResetDirective {\n    ZSTD_reset_session_only = 1,\n    ZSTD_reset_parameters = 2,\n    ZSTD_reset_session_and_parameters = 3,\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_reset() :\\n  There are 2 different things that can be reset, independently or jointly :\\n  - The session : will stop compressing current frame, and make CCtx ready to start a new one.\\n                  Useful after an error, or to interrupt any ongoing compression.\\n                  Any internal data not yet flushed is cancelled.\\n                  Compression parameters and dictionary remain unchanged.\\n                  They will be used to compress next frame.\\n                  Resetting session never fails.\\n  - The parameters : changes all parameters back to \\\"default\\\".\\n                  This also removes any reference to any dictionary or external sequence producer.\\n                  Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)\\n                  otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())\\n  - Both : similar to resetting the session, followed by resetting parameters.\"]\n    pub fn ZSTD_CCtx_reset(\n        cctx: *mut ZSTD_CCtx,\n        reset: ZSTD_ResetDirective,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_compress2() :\\n  Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.\\n  (note that this entry point doesn't even expose a compression level parameter).\\n  ZSTD_compress2() always starts a new frame.\\n  Should cctx hold data from a previously unfinished frame, everything about it is forgotten.\\n  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()\\n  - The function is always blocking, returns when compression is completed.\\n  NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have\\n        enough space to successfully compress the data, though it is possible it fails for other reasons.\\n @return : compressed size written into `dst` (<= `dstCapacity),\\n           or an error code if it fails (which can be tested using ZSTD_isError()).\"]\n    pub fn ZSTD_compress2(\n        cctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\n#[repr(u32)]\n#[doc = \"  Advanced decompression API (Requires v1.4.0+)\"]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_dParameter {\n    ZSTD_d_windowLogMax = 100,\n    ZSTD_d_experimentalParam1 = 1000,\n    ZSTD_d_experimentalParam2 = 1001,\n    ZSTD_d_experimentalParam3 = 1002,\n    ZSTD_d_experimentalParam4 = 1003,\n    ZSTD_d_experimentalParam5 = 1004,\n    ZSTD_d_experimentalParam6 = 1005,\n}\nextern \"C\" {\n    #[doc = \" ZSTD_dParam_getBounds() :\\n  All parameters must belong to an interval with lower and upper bounds,\\n  otherwise they will either trigger an error or be automatically clamped.\\n @return : a structure, ZSTD_bounds, which contains\\n         - an error status field, which must be tested using ZSTD_isError()\\n         - both lower and upper bounds, inclusive\"]\n    pub fn ZSTD_dParam_getBounds(dParam: ZSTD_dParameter) -> ZSTD_bounds;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_setParameter() :\\n  Set one compression parameter, selected by enum ZSTD_dParameter.\\n  All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds().\\n  Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).\\n  Setting a parameter is only possible during frame initialization (before starting decompression).\\n @return : 0, or an error code (which can be tested using ZSTD_isError()).\"]\n    pub fn ZSTD_DCtx_setParameter(\n        dctx: *mut ZSTD_DCtx,\n        param: ZSTD_dParameter,\n        value: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_reset() :\\n  Return a DCtx to clean state.\\n  Session and parameters can be reset jointly or separately.\\n  Parameters can only be reset when no active frame is being decompressed.\\n @return : 0, or an error code, which can be tested with ZSTD_isError()\"]\n    pub fn ZSTD_DCtx_reset(\n        dctx: *mut ZSTD_DCtx,\n        reset: ZSTD_ResetDirective,\n    ) -> usize;\n}\n#[doc = \"  Streaming\"]\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_inBuffer_s {\n    #[doc = \"< start of input buffer\"]\n    pub src: *const ::core::ffi::c_void,\n    #[doc = \"< size of input buffer\"]\n    pub size: usize,\n    #[doc = \"< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size\"]\n    pub pos: usize,\n}\n#[doc = \"  Streaming\"]\npub type ZSTD_inBuffer = ZSTD_inBuffer_s;\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_outBuffer_s {\n    #[doc = \"< start of output buffer\"]\n    pub dst: *mut ::core::ffi::c_void,\n    #[doc = \"< size of output buffer\"]\n    pub size: usize,\n    #[doc = \"< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size\"]\n    pub pos: usize,\n}\npub type ZSTD_outBuffer = ZSTD_outBuffer_s;\npub type ZSTD_CStream = ZSTD_CCtx;\nextern \"C\" {\n    pub fn ZSTD_createCStream() -> *mut ZSTD_CStream;\n}\nextern \"C\" {\n    pub fn ZSTD_freeCStream(zcs: *mut ZSTD_CStream) -> usize;\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_EndDirective {\n    ZSTD_e_continue = 0,\n    ZSTD_e_flush = 1,\n    ZSTD_e_end = 2,\n}\nextern \"C\" {\n    #[doc = \" ZSTD_compressStream2() : Requires v1.4.0+\\n  Behaves about the same as ZSTD_compressStream, with additional control on end directive.\\n  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()\\n  - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)\\n  - output->pos must be <= dstCapacity, input->pos must be <= srcSize\\n  - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.\\n  - endOp must be a valid directive\\n  - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.\\n  - When nbWorkers>=1, function is non-blocking : it copies a portion of input, distributes jobs to internal worker threads, flush to output whatever is available,\\n                                                  and then immediately returns, just indicating that there is some data remaining to be flushed.\\n                                                  The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.\\n  - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.\\n  - @return provides a minimum amount of data remaining to be flushed from internal buffers\\n            or an error code, which can be tested using ZSTD_isError().\\n            if @return != 0, flush is not fully completed, there is still some data left within internal buffers.\\n            This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.\\n            For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.\\n  - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),\\n            only ZSTD_e_end or ZSTD_e_flush operations are allowed.\\n            Before starting a new compression job, or changing compression parameters,\\n            it is required to fully flush internal buffers.\\n  - note: if an operation ends with an error, it may leave @cctx in an undefined state.\\n          Therefore, it's UB to invoke ZSTD_compressStream2() of ZSTD_compressStream() on such a state.\\n          In order to be re-employed after an error, a state must be reset,\\n          which can be done explicitly (ZSTD_CCtx_reset()),\\n          or is sometimes implied by methods starting a new compression job (ZSTD_initCStream(), ZSTD_compressCCtx())\"]\n    pub fn ZSTD_compressStream2(\n        cctx: *mut ZSTD_CCtx,\n        output: *mut ZSTD_outBuffer,\n        input: *mut ZSTD_inBuffer,\n        endOp: ZSTD_EndDirective,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_CStreamInSize() -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_CStreamOutSize() -> usize;\n}\nextern \"C\" {\n    #[doc = \" Equivalent to:\\n\\n     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\\n     ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)\\n     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);\\n\\n Note that ZSTD_initCStream() clears any previously set dictionary. Use the new API\\n to compress with a dictionary.\"]\n    pub fn ZSTD_initCStream(\n        zcs: *mut ZSTD_CStream,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue).\\n NOTE: The return value is different. ZSTD_compressStream() returns a hint for\\n the next read size (if non-zero and not an error). ZSTD_compressStream2()\\n returns the minimum nb of bytes left to flush (if non-zero and not an error).\"]\n    pub fn ZSTD_compressStream(\n        zcs: *mut ZSTD_CStream,\n        output: *mut ZSTD_outBuffer,\n        input: *mut ZSTD_inBuffer,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush).\"]\n    pub fn ZSTD_flushStream(\n        zcs: *mut ZSTD_CStream,\n        output: *mut ZSTD_outBuffer,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end).\"]\n    pub fn ZSTD_endStream(\n        zcs: *mut ZSTD_CStream,\n        output: *mut ZSTD_outBuffer,\n    ) -> usize;\n}\npub type ZSTD_DStream = ZSTD_DCtx;\nextern \"C\" {\n    pub fn ZSTD_createDStream() -> *mut ZSTD_DStream;\n}\nextern \"C\" {\n    pub fn ZSTD_freeDStream(zds: *mut ZSTD_DStream) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_initDStream() :\\n Initialize/reset DStream state for new decompression operation.\\n Call before new decompression operation using same DStream.\\n\\n Note : This function is redundant with the advanced API and equivalent to:\\n     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);\\n     ZSTD_DCtx_refDDict(zds, NULL);\"]\n    pub fn ZSTD_initDStream(zds: *mut ZSTD_DStream) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_decompressStream() :\\n Streaming decompression function.\\n Call repetitively to consume full input updating it as necessary.\\n Function will update both input and output `pos` fields exposing current state via these fields:\\n - `input.pos < input.size`, some input remaining and caller should provide remaining input\\n   on the next call.\\n - `output.pos < output.size`, decoder flushed internal output buffer.\\n - `output.pos == output.size`, unflushed data potentially present in the internal buffers,\\n   check ZSTD_decompressStream() @return value,\\n   if > 0, invoke it again to flush remaining data to output.\\n Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX.\\n\\n @return : 0 when a frame is completely decoded and fully flushed,\\n           or an error code, which can be tested using ZSTD_isError(),\\n           or any other value > 0, which means there is some decoding or flushing to do to complete current frame.\\n\\n Note: when an operation returns with an error code, the @zds state may be left in undefined state.\\n       It's UB to invoke `ZSTD_decompressStream()` on such a state.\\n       In order to re-use such a state, it must be first reset,\\n       which can be done explicitly (`ZSTD_DCtx_reset()`),\\n       or is implied for operations starting some new decompression job (`ZSTD_initDStream`, `ZSTD_decompressDCtx()`, `ZSTD_decompress_usingDict()`)\"]\n    pub fn ZSTD_decompressStream(\n        zds: *mut ZSTD_DStream,\n        output: *mut ZSTD_outBuffer,\n        input: *mut ZSTD_inBuffer,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_DStreamInSize() -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_DStreamOutSize() -> usize;\n}\nextern \"C\" {\n    #[doc = \"  Simple dictionary API\\n/\\n/*! ZSTD_compress_usingDict() :\\n  Compression at an explicit compression level using a Dictionary.\\n  A dictionary can be any arbitrary data segment (also called a prefix),\\n  or a buffer with specified information (see zdict.h).\\n  Note : This function loads the dictionary, resulting in significant startup delay.\\n         It's intended for a dictionary used only once.\\n  Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used.\"]\n    pub fn ZSTD_compress_usingDict(\n        ctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_decompress_usingDict() :\\n  Decompression using a known Dictionary.\\n  Dictionary must be identical to the one used during compression.\\n  Note : This function loads the dictionary, resulting in significant startup delay.\\n         It's intended for a dictionary used only once.\\n  Note : When `dict == NULL || dictSize < 8` no dictionary is used.\"]\n    pub fn ZSTD_decompress_usingDict(\n        dctx: *mut ZSTD_DCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> usize;\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_CDict_s {\n    _unused: [u8; 0],\n}\n#[doc = \"  Bulk processing dictionary API\"]\npub type ZSTD_CDict = ZSTD_CDict_s;\nextern \"C\" {\n    #[doc = \" ZSTD_createCDict() :\\n  When compressing multiple messages or blocks using the same dictionary,\\n  it's recommended to digest the dictionary only once, since it's a costly operation.\\n  ZSTD_createCDict() will create a state from digesting a dictionary.\\n  The resulting state can be used for future compression operations with very limited startup cost.\\n  ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.\\n @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict.\\n  Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content.\\n  Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer,\\n      in which case the only thing that it transports is the @compressionLevel.\\n      This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively,\\n      expecting a ZSTD_CDict parameter with any data, including those without a known dictionary.\"]\n    pub fn ZSTD_createCDict(\n        dictBuffer: *const ::core::ffi::c_void,\n        dictSize: usize,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> *mut ZSTD_CDict;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_freeCDict() :\\n  Function frees memory allocated by ZSTD_createCDict().\\n  If a NULL pointer is passed, no operation is performed.\"]\n    pub fn ZSTD_freeCDict(CDict: *mut ZSTD_CDict) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_compress_usingCDict() :\\n  Compression using a digested Dictionary.\\n  Recommended when same dictionary is used multiple times.\\n  Note : compression level is _decided at dictionary creation time_,\\n     and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no)\"]\n    pub fn ZSTD_compress_usingCDict(\n        cctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        cdict: *const ZSTD_CDict,\n    ) -> usize;\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_DDict_s {\n    _unused: [u8; 0],\n}\npub type ZSTD_DDict = ZSTD_DDict_s;\nextern \"C\" {\n    #[doc = \" ZSTD_createDDict() :\\n  Create a digested dictionary, ready to start decompression operation without startup delay.\\n  dictBuffer can be released after DDict creation, as its content is copied inside DDict.\"]\n    pub fn ZSTD_createDDict(\n        dictBuffer: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> *mut ZSTD_DDict;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_freeDDict() :\\n  Function frees memory allocated with ZSTD_createDDict()\\n  If a NULL pointer is passed, no operation is performed.\"]\n    pub fn ZSTD_freeDDict(ddict: *mut ZSTD_DDict) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_decompress_usingDDict() :\\n  Decompression using a digested Dictionary.\\n  Recommended when same dictionary is used multiple times.\"]\n    pub fn ZSTD_decompress_usingDDict(\n        dctx: *mut ZSTD_DCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        ddict: *const ZSTD_DDict,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_getDictID_fromDict() : Requires v1.4.0+\\n  Provides the dictID stored within dictionary.\\n  if @return == 0, the dictionary is not conformant with Zstandard specification.\\n  It can still be loaded, but as a content-only dictionary.\"]\n    pub fn ZSTD_getDictID_fromDict(\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_getDictID_fromCDict() : Requires v1.5.0+\\n  Provides the dictID of the dictionary loaded into `cdict`.\\n  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.\\n  Non-conformant dictionaries can still be loaded, but as content-only dictionaries.\"]\n    pub fn ZSTD_getDictID_fromCDict(\n        cdict: *const ZSTD_CDict,\n    ) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_getDictID_fromDDict() : Requires v1.4.0+\\n  Provides the dictID of the dictionary loaded into `ddict`.\\n  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.\\n  Non-conformant dictionaries can still be loaded, but as content-only dictionaries.\"]\n    pub fn ZSTD_getDictID_fromDDict(\n        ddict: *const ZSTD_DDict,\n    ) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_getDictID_fromFrame() : Requires v1.4.0+\\n  Provides the dictID required to decompressed the frame stored within `src`.\\n  If @return == 0, the dictID could not be decoded.\\n  This could for one of the following reasons :\\n  - The frame does not require a dictionary to be decoded (most common case).\\n  - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden piece of information.\\n    Note : this use case also happens when using a non-conformant dictionary.\\n  - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).\\n  - This is not a Zstandard frame.\\n  When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code.\"]\n    pub fn ZSTD_getDictID_fromFrame(\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_loadDictionary() : Requires v1.4.0+\\n  Create an internal CDict from `dict` buffer.\\n  Decompression will have to use same dictionary.\\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\\n  Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary,\\n           meaning \\\"return to no-dictionary mode\\\".\\n  Note 1 : Dictionary is sticky, it will be used for all future compressed frames,\\n           until parameters are reset, a new dictionary is loaded, or the dictionary\\n           is explicitly invalidated by loading a NULL dictionary.\\n  Note 2 : Loading a dictionary involves building tables.\\n           It's also a CPU consuming operation, with non-negligible impact on latency.\\n           Tables are dependent on compression parameters, and for this reason,\\n           compression parameters can no longer be changed after loading a dictionary.\\n  Note 3 :`dict` content will be copied internally.\\n           Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead.\\n           In such a case, dictionary buffer must outlive its users.\\n  Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()\\n           to precisely select how dictionary content must be interpreted.\\n  Note 5 : This method does not benefit from LDM (long distance mode).\\n           If you want to employ LDM on some large dictionary content,\\n           prefer employing ZSTD_CCtx_refPrefix() described below.\"]\n    pub fn ZSTD_CCtx_loadDictionary(\n        cctx: *mut ZSTD_CCtx,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_refCDict() : Requires v1.4.0+\\n  Reference a prepared dictionary, to be used for all future compressed frames.\\n  Note that compression parameters are enforced from within CDict,\\n  and supersede any compression parameter previously set within CCtx.\\n  The parameters ignored are labelled as \\\"superseded-by-cdict\\\" in the ZSTD_cParameter enum docs.\\n  The ignored parameters will be used again if the CCtx is returned to no-dictionary mode.\\n  The dictionary will remain valid for future compressed frames using same CCtx.\\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\\n  Special : Referencing a NULL CDict means \\\"return to no-dictionary mode\\\".\\n  Note 1 : Currently, only one dictionary can be managed.\\n           Referencing a new dictionary effectively \\\"discards\\\" any previous one.\\n  Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx.\"]\n    pub fn ZSTD_CCtx_refCDict(\n        cctx: *mut ZSTD_CCtx,\n        cdict: *const ZSTD_CDict,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_refPrefix() : Requires v1.4.0+\\n  Reference a prefix (single-usage dictionary) for next compressed frame.\\n  A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end).\\n  Decompression will need same prefix to properly regenerate data.\\n  Compressing with a prefix is similar in outcome as performing a diff and compressing it,\\n  but performs much faster, especially during decompression (compression speed is tunable with compression level).\\n  This method is compatible with LDM (long distance mode).\\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\\n  Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary\\n  Note 1 : Prefix buffer is referenced. It **must** outlive compression.\\n           Its content must remain unmodified during compression.\\n  Note 2 : If the intention is to diff some large src data blob with some prior version of itself,\\n           ensure that the window size is large enough to contain the entire source.\\n           See ZSTD_c_windowLog.\\n  Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters.\\n           It's a CPU consuming operation, with non-negligible impact on latency.\\n           If there is a need to use the same prefix multiple times, consider loadDictionary instead.\\n  Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent).\\n           Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation.\"]\n    pub fn ZSTD_CCtx_refPrefix(\n        cctx: *mut ZSTD_CCtx,\n        prefix: *const ::core::ffi::c_void,\n        prefixSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_loadDictionary() : Requires v1.4.0+\\n  Create an internal DDict from dict buffer, to be used to decompress all future frames.\\n  The dictionary remains valid for all future frames, until explicitly invalidated, or\\n  a new dictionary is loaded.\\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\\n  Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,\\n            meaning \\\"return to no-dictionary mode\\\".\\n  Note 1 : Loading a dictionary involves building tables,\\n           which has a non-negligible impact on CPU usage and latency.\\n           It's recommended to \\\"load once, use many times\\\", to amortize the cost\\n  Note 2 :`dict` content will be copied internally, so `dict` can be released after loading.\\n           Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead.\\n  Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of\\n           how dictionary content is loaded and interpreted.\"]\n    pub fn ZSTD_DCtx_loadDictionary(\n        dctx: *mut ZSTD_DCtx,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_refDDict() : Requires v1.4.0+\\n  Reference a prepared dictionary, to be used to decompress next frames.\\n  The dictionary remains active for decompression of future frames using same DCtx.\\n\\n  If called with ZSTD_d_refMultipleDDicts enabled, repeated calls of this function\\n  will store the DDict references in a table, and the DDict used for decompression\\n  will be determined at decompression time, as per the dict ID in the frame.\\n  The memory for the table is allocated on the first call to refDDict, and can be\\n  freed with ZSTD_freeDCtx().\\n\\n  If called with ZSTD_d_refMultipleDDicts disabled (the default), only one dictionary\\n  will be managed, and referencing a dictionary effectively \\\"discards\\\" any previous one.\\n\\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\\n  Special: referencing a NULL DDict means \\\"return to no-dictionary mode\\\".\\n  Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx.\"]\n    pub fn ZSTD_DCtx_refDDict(\n        dctx: *mut ZSTD_DCtx,\n        ddict: *const ZSTD_DDict,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_refPrefix() : Requires v1.4.0+\\n  Reference a prefix (single-usage dictionary) to decompress next frame.\\n  This is the reverse operation of ZSTD_CCtx_refPrefix(),\\n  and must use the same prefix as the one used during compression.\\n  Prefix is **only used once**. Reference is discarded at end of frame.\\n  End of frame is reached when ZSTD_decompressStream() returns 0.\\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\\n  Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary\\n  Note 2 : Prefix buffer is referenced. It **must** outlive decompression.\\n           Prefix buffer must remain unmodified up to the end of frame,\\n           reached when ZSTD_decompressStream() returns 0.\\n  Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent).\\n           Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section)\\n  Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.\\n           A full dictionary is more costly, as it requires building tables.\"]\n    pub fn ZSTD_DCtx_refPrefix(\n        dctx: *mut ZSTD_DCtx,\n        prefix: *const ::core::ffi::c_void,\n        prefixSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_sizeof_*() : Requires v1.4.0+\\n  These functions give the _current_ memory usage of selected object.\\n  Note that object memory usage can evolve (increase or decrease) over time.\"]\n    pub fn ZSTD_sizeof_CCtx(cctx: *const ZSTD_CCtx) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_sizeof_DCtx(dctx: *const ZSTD_DCtx) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_sizeof_CStream(zcs: *const ZSTD_CStream) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_sizeof_DStream(zds: *const ZSTD_DStream) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_sizeof_CDict(cdict: *const ZSTD_CDict) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_sizeof_DDict(ddict: *const ZSTD_DDict) -> usize;\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_CCtx_params_s {\n    _unused: [u8; 0],\n}\npub type ZSTD_CCtx_params = ZSTD_CCtx_params_s;\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_Sequence {\n    pub offset: ::core::ffi::c_uint,\n    pub litLength: ::core::ffi::c_uint,\n    pub matchLength: ::core::ffi::c_uint,\n    pub rep: ::core::ffi::c_uint,\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_compressionParameters {\n    #[doc = \"< largest match distance : larger == more compression, more memory needed during decompression\"]\n    pub windowLog: ::core::ffi::c_uint,\n    #[doc = \"< fully searched segment : larger == more compression, slower, more memory (useless for fast)\"]\n    pub chainLog: ::core::ffi::c_uint,\n    #[doc = \"< dispatch table : larger == faster, more memory\"]\n    pub hashLog: ::core::ffi::c_uint,\n    #[doc = \"< nb of searches : larger == more compression, slower\"]\n    pub searchLog: ::core::ffi::c_uint,\n    #[doc = \"< match length searched : larger == faster decompression, sometimes less compression\"]\n    pub minMatch: ::core::ffi::c_uint,\n    #[doc = \"< acceptable match size for optimal parser (only) : larger == more compression, slower\"]\n    pub targetLength: ::core::ffi::c_uint,\n    #[doc = \"< see ZSTD_strategy definition above\"]\n    pub strategy: ZSTD_strategy,\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_frameParameters {\n    #[doc = \"< 1: content size will be in frame header (when known)\"]\n    pub contentSizeFlag: ::core::ffi::c_int,\n    #[doc = \"< 1: generate a 32-bits checksum using XXH64 algorithm at end of frame, for error detection\"]\n    pub checksumFlag: ::core::ffi::c_int,\n    #[doc = \"< 1: no dictID will be saved into frame header (dictID is only useful for dictionary compression)\"]\n    pub noDictIDFlag: ::core::ffi::c_int,\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_parameters {\n    pub cParams: ZSTD_compressionParameters,\n    pub fParams: ZSTD_frameParameters,\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_dictContentType_e {\n    ZSTD_dct_auto = 0,\n    ZSTD_dct_rawContent = 1,\n    ZSTD_dct_fullDict = 2,\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_dictLoadMethod_e {\n    #[doc = \"< Copy dictionary content internally\"]\n    ZSTD_dlm_byCopy = 0,\n    #[doc = \"< Reference dictionary content -- the dictionary buffer must outlive its users.\"]\n    ZSTD_dlm_byRef = 1,\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_format_e {\n    ZSTD_f_zstd1 = 0,\n    ZSTD_f_zstd1_magicless = 1,\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_forceIgnoreChecksum_e {\n    ZSTD_d_validateChecksum = 0,\n    ZSTD_d_ignoreChecksum = 1,\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_refMultipleDDicts_e {\n    ZSTD_rmd_refSingleDDict = 0,\n    ZSTD_rmd_refMultipleDDicts = 1,\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_dictAttachPref_e {\n    ZSTD_dictDefaultAttach = 0,\n    ZSTD_dictForceAttach = 1,\n    ZSTD_dictForceCopy = 2,\n    ZSTD_dictForceLoad = 3,\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_literalCompressionMode_e {\n    #[doc = \"< Automatically determine the compression mode based on the compression level.\\n   Negative compression levels will be uncompressed, and positive compression\\n   levels will be compressed.\"]\n    ZSTD_lcm_auto = 0,\n    #[doc = \"< Always attempt Huffman compression. Uncompressed literals will still be\\n   emitted if Huffman compression is not profitable.\"]\n    ZSTD_lcm_huffman = 1,\n    #[doc = \"< Always emit uncompressed literals.\"]\n    ZSTD_lcm_uncompressed = 2,\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_ParamSwitch_e {\n    ZSTD_ps_auto = 0,\n    ZSTD_ps_enable = 1,\n    ZSTD_ps_disable = 2,\n}\nextern \"C\" {\n    #[doc = \" ZSTD_findDecompressedSize() :\\n  `src` should point to the start of a series of ZSTD encoded and/or skippable frames\\n  `srcSize` must be the _exact_ size of this series\\n       (i.e. there should be a frame boundary at `src + srcSize`)\\n  @return : - decompressed size of all data in all successive frames\\n            - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN\\n            - if an error occurred: ZSTD_CONTENTSIZE_ERROR\\n\\n   note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.\\n            When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.\\n            In which case, it's necessary to use streaming mode to decompress data.\\n   note 2 : decompressed size is always present when compression is done with ZSTD_compress()\\n   note 3 : decompressed size can be very large (64-bits value),\\n            potentially larger than what local system can handle as a single memory segment.\\n            In which case, it's necessary to use streaming mode to decompress data.\\n   note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.\\n            Always ensure result fits within application's authorized limits.\\n            Each application can set its own limits.\\n   note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to\\n            read each contained frame header.  This is fast as most of the data is skipped,\\n            however it does mean that all frame data must be present and valid.\"]\n    pub fn ZSTD_findDecompressedSize(\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> ::core::ffi::c_ulonglong;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_decompressBound() :\\n  `src` should point to the start of a series of ZSTD encoded and/or skippable frames\\n  `srcSize` must be the _exact_ size of this series\\n       (i.e. there should be a frame boundary at `src + srcSize`)\\n  @return : - upper-bound for the decompressed size of all data in all successive frames\\n            - if an error occurred: ZSTD_CONTENTSIZE_ERROR\\n\\n  note 1  : an error can occur if `src` contains an invalid or incorrectly formatted frame.\\n  note 2  : the upper-bound is exact when the decompressed size field is available in every ZSTD encoded frame of `src`.\\n            in this case, `ZSTD_findDecompressedSize` and `ZSTD_decompressBound` return the same value.\\n  note 3  : when the decompressed size field isn't available, the upper-bound for that frame is calculated by:\\n              upper-bound = # blocks * min(128 KB, Window_Size)\"]\n    pub fn ZSTD_decompressBound(\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> ::core::ffi::c_ulonglong;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_frameHeaderSize() :\\n  srcSize must be large enough, aka >= ZSTD_FRAMEHEADERSIZE_PREFIX.\\n @return : size of the Frame Header,\\n           or an error code (if srcSize is too small)\"]\n    pub fn ZSTD_frameHeaderSize(\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_FrameType_e {\n    ZSTD_frame = 0,\n    ZSTD_skippableFrame = 1,\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_FrameHeader {\n    pub frameContentSize: ::core::ffi::c_ulonglong,\n    pub windowSize: ::core::ffi::c_ulonglong,\n    pub blockSizeMax: ::core::ffi::c_uint,\n    pub frameType: ZSTD_FrameType_e,\n    pub headerSize: ::core::ffi::c_uint,\n    pub dictID: ::core::ffi::c_uint,\n    pub checksumFlag: ::core::ffi::c_uint,\n    pub _reserved1: ::core::ffi::c_uint,\n    pub _reserved2: ::core::ffi::c_uint,\n}\nextern \"C\" {\n    #[doc = \" ZSTD_getFrameHeader() :\\n  decode Frame Header into `zfhPtr`, or requires larger `srcSize`.\\n @return : 0 => header is complete, `zfhPtr` is correctly filled,\\n          >0 => `srcSize` is too small, @return value is the wanted `srcSize` amount, `zfhPtr` is not filled,\\n           or an error code, which can be tested using ZSTD_isError()\"]\n    pub fn ZSTD_getFrameHeader(\n        zfhPtr: *mut ZSTD_FrameHeader,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_getFrameHeader_advanced() :\\n  same as ZSTD_getFrameHeader(),\\n  with added capability to select a format (like ZSTD_f_zstd1_magicless)\"]\n    pub fn ZSTD_getFrameHeader_advanced(\n        zfhPtr: *mut ZSTD_FrameHeader,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        format: ZSTD_format_e,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_decompressionMargin() :\\n Zstd supports in-place decompression, where the input and output buffers overlap.\\n In this case, the output buffer must be at least (Margin + Output_Size) bytes large,\\n and the input buffer must be at the end of the output buffer.\\n\\n  _______________________ Output Buffer ________________________\\n |                                                              |\\n |                                        ____ Input Buffer ____|\\n |                                       |                      |\\n v                                       v                      v\\n |---------------------------------------|-----------|----------|\\n ^                                                   ^          ^\\n |___________________ Output_Size ___________________|_ Margin _|\\n\\n NOTE: See also ZSTD_DECOMPRESSION_MARGIN().\\n NOTE: This applies only to single-pass decompression through ZSTD_decompress() or\\n ZSTD_decompressDCtx().\\n NOTE: This function supports multi-frame input.\\n\\n @param src The compressed frame(s)\\n @param srcSize The size of the compressed frame(s)\\n @returns The decompression margin or an error that can be checked with ZSTD_isError().\"]\n    pub fn ZSTD_decompressionMargin(\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_SequenceFormat_e {\n    ZSTD_sf_noBlockDelimiters = 0,\n    ZSTD_sf_explicitBlockDelimiters = 1,\n}\nextern \"C\" {\n    #[doc = \" ZSTD_sequenceBound() :\\n `srcSize` : size of the input buffer\\n  @return : upper-bound for the number of sequences that can be generated\\n            from a buffer of srcSize bytes\\n\\n  note : returns number of sequences - to get bytes, multiply by sizeof(ZSTD_Sequence).\"]\n    pub fn ZSTD_sequenceBound(srcSize: usize) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_generateSequences() :\\n WARNING: This function is meant for debugging and informational purposes ONLY!\\n Its implementation is flawed, and it will be deleted in a future version.\\n It is not guaranteed to succeed, as there are several cases where it will give\\n up and fail. You should NOT use this function in production code.\\n\\n This function is deprecated, and will be removed in a future version.\\n\\n Generate sequences using ZSTD_compress2(), given a source buffer.\\n\\n @param zc The compression context to be used for ZSTD_compress2(). Set any\\n           compression parameters you need on this context.\\n @param outSeqs The output sequences buffer of size @p outSeqsSize\\n @param outSeqsCapacity The size of the output sequences buffer.\\n                    ZSTD_sequenceBound(srcSize) is an upper bound on the number\\n                    of sequences that can be generated.\\n @param src The source buffer to generate sequences from of size @p srcSize.\\n @param srcSize The size of the source buffer.\\n\\n Each block will end with a dummy sequence\\n with offset == 0, matchLength == 0, and litLength == length of last literals.\\n litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0)\\n simply acts as a block delimiter.\\n\\n @returns The number of sequences generated, necessarily less than\\n          ZSTD_sequenceBound(srcSize), or an error code that can be checked\\n          with ZSTD_isError().\"]\n    pub fn ZSTD_generateSequences(\n        zc: *mut ZSTD_CCtx,\n        outSeqs: *mut ZSTD_Sequence,\n        outSeqsCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_mergeBlockDelimiters() :\\n Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals\\n by merging them into the literals of the next sequence.\\n\\n As such, the final generated result has no explicit representation of block boundaries,\\n and the final last literals segment is not represented in the sequences.\\n\\n The output of this function can be fed into ZSTD_compressSequences() with CCtx\\n setting of ZSTD_c_blockDelimiters as ZSTD_sf_noBlockDelimiters\\n @return : number of sequences left after merging\"]\n    pub fn ZSTD_mergeBlockDelimiters(\n        sequences: *mut ZSTD_Sequence,\n        seqsSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_compressSequences() :\\n Compress an array of ZSTD_Sequence, associated with @src buffer, into dst.\\n @src contains the entire input (not just the literals).\\n If @srcSize > sum(sequence.length), the remaining bytes are considered all literals\\n If a dictionary is included, then the cctx should reference the dict (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.).\\n The entire source is compressed into a single frame.\\n\\n The compression behavior changes based on cctx params. In particular:\\n    If ZSTD_c_blockDelimiters == ZSTD_sf_noBlockDelimiters, the array of ZSTD_Sequence is expected to contain\\n    no block delimiters (defined in ZSTD_Sequence). Block boundaries are roughly determined based on\\n    the block size derived from the cctx, and sequences may be split. This is the default setting.\\n\\n    If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain\\n    valid block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided.\\n\\n    When ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, it's possible to decide generating repcodes\\n    using the advanced parameter ZSTD_c_repcodeResolution. Repcodes will improve compression ratio, though the benefit\\n    can vary greatly depending on Sequences. On the other hand, repcode resolution is an expensive operation.\\n    By default, it's disabled at low (<10) compression levels, and enabled above the threshold (>=10).\\n    ZSTD_c_repcodeResolution makes it possible to directly manage this processing in either direction.\\n\\n    If ZSTD_c_validateSequences == 0, this function blindly accepts the Sequences provided. Invalid Sequences cause undefined\\n    behavior. If ZSTD_c_validateSequences == 1, then the function will detect invalid Sequences (see doc/zstd_compression_format.md for\\n    specifics regarding offset/matchlength requirements) and then bail out and return an error.\\n\\n    In addition to the two adjustable experimental params, there are other important cctx params.\\n    - ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN.\\n    - ZSTD_c_compressionLevel accordingly adjusts the strength of the entropy coder, as it would in typical compression.\\n    - ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset\\n      is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md\\n\\n Note: Repcodes are, as of now, always re-calculated within this function, ZSTD_Sequence.rep is effectively unused.\\n Dev Note: Once ability to ingest repcodes become available, the explicit block delims mode must respect those repcodes exactly,\\n         and cannot emit an RLE block that disagrees with the repcode history.\\n @return : final compressed size, or a ZSTD error code.\"]\n    pub fn ZSTD_compressSequences(\n        cctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        inSeqs: *const ZSTD_Sequence,\n        inSeqsSize: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_compressSequencesAndLiterals() :\\n This is a variant of ZSTD_compressSequences() which,\\n instead of receiving (src,srcSize) as input parameter, receives (literals,litSize),\\n aka all the literals, already extracted and laid out into a single continuous buffer.\\n This can be useful if the process generating the sequences also happens to generate the buffer of literals,\\n thus skipping an extraction + caching stage.\\n It's a speed optimization, useful when the right conditions are met,\\n but it also features the following limitations:\\n - Only supports explicit delimiter mode\\n - Currently does not support Sequences validation (so input Sequences are trusted)\\n - Not compatible with frame checksum, which must be disabled\\n - If any block is incompressible, will fail and return an error\\n - @litSize must be == sum of all @.litLength fields in @inSeqs. Any discrepancy will generate an error.\\n - @litBufCapacity is the size of the underlying buffer into which literals are written, starting at address @literals.\\n   @litBufCapacity must be at least 8 bytes larger than @litSize.\\n - @decompressedSize must be correct, and correspond to the sum of all Sequences. Any discrepancy will generate an error.\\n @return : final compressed size, or a ZSTD error code.\"]\n    pub fn ZSTD_compressSequencesAndLiterals(\n        cctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        inSeqs: *const ZSTD_Sequence,\n        nbSequences: usize,\n        literals: *const ::core::ffi::c_void,\n        litSize: usize,\n        litBufCapacity: usize,\n        decompressedSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_writeSkippableFrame() :\\n Generates a zstd skippable frame containing data given by src, and writes it to dst buffer.\\n\\n Skippable frames begin with a 4-byte magic number. There are 16 possible choices of magic number,\\n ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15.\\n As such, the parameter magicVariant controls the exact skippable frame magic number variant used,\\n so the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant.\\n\\n Returns an error if destination buffer is not large enough, if the source size is not representable\\n with a 4-byte unsigned int, or if the parameter magicVariant is greater than 15 (and therefore invalid).\\n\\n @return : number of bytes written or a ZSTD error.\"]\n    pub fn ZSTD_writeSkippableFrame(\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        magicVariant: ::core::ffi::c_uint,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_readSkippableFrame() :\\n Retrieves the content of a zstd skippable frame starting at @src, and writes it to @dst buffer.\\n\\n The parameter @magicVariant will receive the magicVariant that was supplied when the frame was written,\\n i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START.\\n This can be NULL if the caller is not interested in the magicVariant.\\n\\n Returns an error if destination buffer is not large enough, or if the frame is not skippable.\\n\\n @return : number of bytes written or a ZSTD error.\"]\n    pub fn ZSTD_readSkippableFrame(\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        magicVariant: *mut ::core::ffi::c_uint,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_isSkippableFrame() :\\n  Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame.\"]\n    pub fn ZSTD_isSkippableFrame(\n        buffer: *const ::core::ffi::c_void,\n        size: usize,\n    ) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_estimate*() :\\n  These functions make it possible to estimate memory usage\\n  of a future {D,C}Ctx, before its creation.\\n  This is useful in combination with ZSTD_initStatic(),\\n  which makes it possible to employ a static buffer for ZSTD_CCtx* state.\\n\\n  ZSTD_estimateCCtxSize() will provide a memory budget large enough\\n  to compress data of any size using one-shot compression ZSTD_compressCCtx() or ZSTD_compress2()\\n  associated with any compression level up to max specified one.\\n  The estimate will assume the input may be arbitrarily large,\\n  which is the worst case.\\n\\n  Note that the size estimation is specific for one-shot compression,\\n  it is not valid for streaming (see ZSTD_estimateCStreamSize*())\\n  nor other potential ways of using a ZSTD_CCtx* state.\\n\\n  When srcSize can be bound by a known and rather \\\"small\\\" value,\\n  this knowledge can be used to provide a tighter budget estimation\\n  because the ZSTD_CCtx* state will need less memory for small inputs.\\n  This tighter estimation can be provided by employing more advanced functions\\n  ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(),\\n  and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter().\\n  Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits.\\n\\n  Note : only single-threaded compression is supported.\\n  ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.\"]\n    pub fn ZSTD_estimateCCtxSize(\n        maxCompressionLevel: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_estimateCCtxSize_usingCParams(\n        cParams: ZSTD_compressionParameters,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_estimateCCtxSize_usingCCtxParams(\n        params: *const ZSTD_CCtx_params,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_estimateDCtxSize() -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_estimateCStreamSize() :\\n  ZSTD_estimateCStreamSize() will provide a memory budget large enough for streaming compression\\n  using any compression level up to the max specified one.\\n  It will also consider src size to be arbitrarily \\\"large\\\", which is a worst case scenario.\\n  If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.\\n  ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.\\n  ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.\\n  Note : CStream size estimation is only correct for single-threaded compression.\\n  ZSTD_estimateCStreamSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.\\n  Note 2 : ZSTD_estimateCStreamSize* functions are not compatible with the Block-Level Sequence Producer API at this time.\\n  Size estimates assume that no external sequence producer is registered.\\n\\n  ZSTD_DStream memory budget depends on frame's window Size.\\n  This information can be passed manually, using ZSTD_estimateDStreamSize,\\n  or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();\\n  Any frame requesting a window size larger than max specified one will be rejected.\\n  Note : if streaming is init with function ZSTD_init?Stream_usingDict(),\\n         an internal ?Dict will be created, which additional size is not estimated here.\\n         In this case, get total size by adding ZSTD_estimate?DictSize\"]\n    pub fn ZSTD_estimateCStreamSize(\n        maxCompressionLevel: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_estimateCStreamSize_usingCParams(\n        cParams: ZSTD_compressionParameters,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_estimateCStreamSize_usingCCtxParams(\n        params: *const ZSTD_CCtx_params,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_estimateDStreamSize(maxWindowSize: usize) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_estimateDStreamSize_fromFrame(\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_estimate?DictSize() :\\n  ZSTD_estimateCDictSize() will bet that src size is relatively \\\"small\\\", and content is copied, like ZSTD_createCDict().\\n  ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced().\\n  Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller.\"]\n    pub fn ZSTD_estimateCDictSize(\n        dictSize: usize,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_estimateCDictSize_advanced(\n        dictSize: usize,\n        cParams: ZSTD_compressionParameters,\n        dictLoadMethod: ZSTD_dictLoadMethod_e,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_estimateDDictSize(\n        dictSize: usize,\n        dictLoadMethod: ZSTD_dictLoadMethod_e,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_initStatic*() :\\n  Initialize an object using a pre-allocated fixed-size buffer.\\n  workspace: The memory area to emplace the object into.\\n             Provided pointer *must be 8-bytes aligned*.\\n             Buffer must outlive object.\\n  workspaceSize: Use ZSTD_estimate*Size() to determine\\n                 how large workspace must be to support target scenario.\\n @return : pointer to object (same address as workspace, just different type),\\n           or NULL if error (size too small, incorrect alignment, etc.)\\n  Note : zstd will never resize nor malloc() when using a static buffer.\\n         If the object requires more memory than available,\\n         zstd will just error out (typically ZSTD_error_memory_allocation).\\n  Note 2 : there is no corresponding \\\"free\\\" function.\\n           Since workspace is allocated externally, it must be freed externally too.\\n  Note 3 : cParams : use ZSTD_getCParams() to convert a compression level\\n           into its associated cParams.\\n  Limitation 1 : currently not compatible with internal dictionary creation, triggered by\\n                 ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict().\\n  Limitation 2 : static cctx currently not compatible with multi-threading.\\n  Limitation 3 : static dctx is incompatible with legacy support.\"]\n    pub fn ZSTD_initStaticCCtx(\n        workspace: *mut ::core::ffi::c_void,\n        workspaceSize: usize,\n    ) -> *mut ZSTD_CCtx;\n}\nextern \"C\" {\n    pub fn ZSTD_initStaticCStream(\n        workspace: *mut ::core::ffi::c_void,\n        workspaceSize: usize,\n    ) -> *mut ZSTD_CStream;\n}\nextern \"C\" {\n    pub fn ZSTD_initStaticDCtx(\n        workspace: *mut ::core::ffi::c_void,\n        workspaceSize: usize,\n    ) -> *mut ZSTD_DCtx;\n}\nextern \"C\" {\n    pub fn ZSTD_initStaticDStream(\n        workspace: *mut ::core::ffi::c_void,\n        workspaceSize: usize,\n    ) -> *mut ZSTD_DStream;\n}\nextern \"C\" {\n    pub fn ZSTD_initStaticCDict(\n        workspace: *mut ::core::ffi::c_void,\n        workspaceSize: usize,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        dictLoadMethod: ZSTD_dictLoadMethod_e,\n        dictContentType: ZSTD_dictContentType_e,\n        cParams: ZSTD_compressionParameters,\n    ) -> *const ZSTD_CDict;\n}\nextern \"C\" {\n    pub fn ZSTD_initStaticDDict(\n        workspace: *mut ::core::ffi::c_void,\n        workspaceSize: usize,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        dictLoadMethod: ZSTD_dictLoadMethod_e,\n        dictContentType: ZSTD_dictContentType_e,\n    ) -> *const ZSTD_DDict;\n}\n#[doc = \" Custom memory allocation :\\n  These prototypes make it possible to pass your own allocation/free functions.\\n  ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below.\\n  All allocation/free operations will be completed using these custom variants instead of regular <stdlib.h> ones.\"]\npub type ZSTD_allocFunction = ::core::option::Option<\n    unsafe extern \"C\" fn(\n        opaque: *mut ::core::ffi::c_void,\n        size: usize,\n    ) -> *mut ::core::ffi::c_void,\n>;\npub type ZSTD_freeFunction = ::core::option::Option<\n    unsafe extern \"C\" fn(\n        opaque: *mut ::core::ffi::c_void,\n        address: *mut ::core::ffi::c_void,\n    ),\n>;\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_customMem {\n    pub customAlloc: ZSTD_allocFunction,\n    pub customFree: ZSTD_freeFunction,\n    pub opaque: *mut ::core::ffi::c_void,\n}\nextern \"C\" {\n    #[doc = \"< this constant defers to stdlib's functions\"]\n    pub static ZSTD_defaultCMem: ZSTD_customMem;\n}\nextern \"C\" {\n    pub fn ZSTD_createCCtx_advanced(\n        customMem: ZSTD_customMem,\n    ) -> *mut ZSTD_CCtx;\n}\nextern \"C\" {\n    pub fn ZSTD_createCStream_advanced(\n        customMem: ZSTD_customMem,\n    ) -> *mut ZSTD_CStream;\n}\nextern \"C\" {\n    pub fn ZSTD_createDCtx_advanced(\n        customMem: ZSTD_customMem,\n    ) -> *mut ZSTD_DCtx;\n}\nextern \"C\" {\n    pub fn ZSTD_createDStream_advanced(\n        customMem: ZSTD_customMem,\n    ) -> *mut ZSTD_DStream;\n}\nextern \"C\" {\n    pub fn ZSTD_createCDict_advanced(\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        dictLoadMethod: ZSTD_dictLoadMethod_e,\n        dictContentType: ZSTD_dictContentType_e,\n        cParams: ZSTD_compressionParameters,\n        customMem: ZSTD_customMem,\n    ) -> *mut ZSTD_CDict;\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct POOL_ctx_s {\n    _unused: [u8; 0],\n}\n#[doc = \" Thread pool :\\n  These prototypes make it possible to share a thread pool among multiple compression contexts.\\n  This can limit resources for applications with multiple threads where each one uses\\n  a threaded compression mode (via ZSTD_c_nbWorkers parameter).\\n  ZSTD_createThreadPool creates a new thread pool with a given number of threads.\\n  Note that the lifetime of such pool must exist while being used.\\n  ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value\\n  to use an internal thread pool).\\n  ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer.\"]\npub type ZSTD_threadPool = POOL_ctx_s;\nextern \"C\" {\n    pub fn ZSTD_createThreadPool(numThreads: usize) -> *mut ZSTD_threadPool;\n}\nextern \"C\" {\n    pub fn ZSTD_freeThreadPool(pool: *mut ZSTD_threadPool);\n}\nextern \"C\" {\n    pub fn ZSTD_CCtx_refThreadPool(\n        cctx: *mut ZSTD_CCtx,\n        pool: *mut ZSTD_threadPool,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_createCDict_advanced2(\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        dictLoadMethod: ZSTD_dictLoadMethod_e,\n        dictContentType: ZSTD_dictContentType_e,\n        cctxParams: *const ZSTD_CCtx_params,\n        customMem: ZSTD_customMem,\n    ) -> *mut ZSTD_CDict;\n}\nextern \"C\" {\n    pub fn ZSTD_createDDict_advanced(\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        dictLoadMethod: ZSTD_dictLoadMethod_e,\n        dictContentType: ZSTD_dictContentType_e,\n        customMem: ZSTD_customMem,\n    ) -> *mut ZSTD_DDict;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_createCDict_byReference() :\\n  Create a digested dictionary for compression\\n  Dictionary content is just referenced, not duplicated.\\n  As a consequence, `dictBuffer` **must** outlive CDict,\\n  and its content must remain unmodified throughout the lifetime of CDict.\\n  note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef\"]\n    pub fn ZSTD_createCDict_byReference(\n        dictBuffer: *const ::core::ffi::c_void,\n        dictSize: usize,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> *mut ZSTD_CDict;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_getCParams() :\\n @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.\\n `estimatedSrcSize` value is optional, select 0 if not known\"]\n    pub fn ZSTD_getCParams(\n        compressionLevel: ::core::ffi::c_int,\n        estimatedSrcSize: ::core::ffi::c_ulonglong,\n        dictSize: usize,\n    ) -> ZSTD_compressionParameters;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_getParams() :\\n  same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.\\n  All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0\"]\n    pub fn ZSTD_getParams(\n        compressionLevel: ::core::ffi::c_int,\n        estimatedSrcSize: ::core::ffi::c_ulonglong,\n        dictSize: usize,\n    ) -> ZSTD_parameters;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_checkCParams() :\\n  Ensure param values remain within authorized range.\\n @return 0 on success, or an error code (can be checked with ZSTD_isError())\"]\n    pub fn ZSTD_checkCParams(params: ZSTD_compressionParameters) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_adjustCParams() :\\n  optimize params for a given `srcSize` and `dictSize`.\\n `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN.\\n `dictSize` must be `0` when there is no dictionary.\\n  cPar can be invalid : all parameters will be clamped within valid range in the @return struct.\\n  This function never fails (wide contract)\"]\n    pub fn ZSTD_adjustCParams(\n        cPar: ZSTD_compressionParameters,\n        srcSize: ::core::ffi::c_ulonglong,\n        dictSize: usize,\n    ) -> ZSTD_compressionParameters;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_setCParams() :\\n  Set all parameters provided within @p cparams into the working @p cctx.\\n  Note : if modifying parameters during compression (MT mode only),\\n         note that changes to the .windowLog parameter will be ignored.\\n @return 0 on success, or an error code (can be checked with ZSTD_isError()).\\n         On failure, no parameters are updated.\"]\n    pub fn ZSTD_CCtx_setCParams(\n        cctx: *mut ZSTD_CCtx,\n        cparams: ZSTD_compressionParameters,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_setFParams() :\\n  Set all parameters provided within @p fparams into the working @p cctx.\\n @return 0 on success, or an error code (can be checked with ZSTD_isError()).\"]\n    pub fn ZSTD_CCtx_setFParams(\n        cctx: *mut ZSTD_CCtx,\n        fparams: ZSTD_frameParameters,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_setParams() :\\n  Set all parameters provided within @p params into the working @p cctx.\\n @return 0 on success, or an error code (can be checked with ZSTD_isError()).\"]\n    pub fn ZSTD_CCtx_setParams(\n        cctx: *mut ZSTD_CCtx,\n        params: ZSTD_parameters,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_compress_advanced() :\\n  Note : this function is now DEPRECATED.\\n         It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters.\\n  This prototype will generate compilation warnings.\"]\n    pub fn ZSTD_compress_advanced(\n        cctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        params: ZSTD_parameters,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_compress_usingCDict_advanced() :\\n  Note : this function is now DEPRECATED.\\n         It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters.\\n  This prototype will generate compilation warnings.\"]\n    pub fn ZSTD_compress_usingCDict_advanced(\n        cctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        cdict: *const ZSTD_CDict,\n        fParams: ZSTD_frameParameters,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_loadDictionary_byReference() :\\n  Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx.\\n  It saves some memory, but also requires that `dict` outlives its usage within `cctx`\"]\n    pub fn ZSTD_CCtx_loadDictionary_byReference(\n        cctx: *mut ZSTD_CCtx,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_loadDictionary_advanced() :\\n  Same as ZSTD_CCtx_loadDictionary(), but gives finer control over\\n  how to load the dictionary (by copy ? by reference ?)\\n  and how to interpret it (automatic ? force raw mode ? full mode only ?)\"]\n    pub fn ZSTD_CCtx_loadDictionary_advanced(\n        cctx: *mut ZSTD_CCtx,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        dictLoadMethod: ZSTD_dictLoadMethod_e,\n        dictContentType: ZSTD_dictContentType_e,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_refPrefix_advanced() :\\n  Same as ZSTD_CCtx_refPrefix(), but gives finer control over\\n  how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?)\"]\n    pub fn ZSTD_CCtx_refPrefix_advanced(\n        cctx: *mut ZSTD_CCtx,\n        prefix: *const ::core::ffi::c_void,\n        prefixSize: usize,\n        dictContentType: ZSTD_dictContentType_e,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_getParameter() :\\n  Get the requested compression parameter value, selected by enum ZSTD_cParameter,\\n  and store it into int* value.\\n @return : 0, or an error code (which can be tested with ZSTD_isError()).\"]\n    pub fn ZSTD_CCtx_getParameter(\n        cctx: *const ZSTD_CCtx,\n        param: ZSTD_cParameter,\n        value: *mut ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_params :\\n  Quick howto :\\n  - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure\\n  - ZSTD_CCtxParams_setParameter() : Push parameters one by one into\\n                                     an existing ZSTD_CCtx_params structure.\\n                                     This is similar to\\n                                     ZSTD_CCtx_setParameter().\\n  - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to\\n                                    an existing CCtx.\\n                                    These parameters will be applied to\\n                                    all subsequent frames.\\n  - ZSTD_compressStream2() : Do compression using the CCtx.\\n  - ZSTD_freeCCtxParams() : Free the memory, accept NULL pointer.\\n\\n  This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams()\\n  for static allocation of CCtx for single-threaded compression.\"]\n    pub fn ZSTD_createCCtxParams() -> *mut ZSTD_CCtx_params;\n}\nextern \"C\" {\n    pub fn ZSTD_freeCCtxParams(params: *mut ZSTD_CCtx_params) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtxParams_reset() :\\n  Reset params to default values.\"]\n    pub fn ZSTD_CCtxParams_reset(params: *mut ZSTD_CCtx_params) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtxParams_init() :\\n  Initializes the compression parameters of cctxParams according to\\n  compression level. All other parameters are reset to their default values.\"]\n    pub fn ZSTD_CCtxParams_init(\n        cctxParams: *mut ZSTD_CCtx_params,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtxParams_init_advanced() :\\n  Initializes the compression and frame parameters of cctxParams according to\\n  params. All other parameters are reset to their default values.\"]\n    pub fn ZSTD_CCtxParams_init_advanced(\n        cctxParams: *mut ZSTD_CCtx_params,\n        params: ZSTD_parameters,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtxParams_setParameter() : Requires v1.4.0+\\n  Similar to ZSTD_CCtx_setParameter.\\n  Set one compression parameter, selected by enum ZSTD_cParameter.\\n  Parameters must be applied to a ZSTD_CCtx using\\n  ZSTD_CCtx_setParametersUsingCCtxParams().\\n @result : a code representing success or failure (which can be tested with\\n           ZSTD_isError()).\"]\n    pub fn ZSTD_CCtxParams_setParameter(\n        params: *mut ZSTD_CCtx_params,\n        param: ZSTD_cParameter,\n        value: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtxParams_getParameter() :\\n Similar to ZSTD_CCtx_getParameter.\\n Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.\\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\"]\n    pub fn ZSTD_CCtxParams_getParameter(\n        params: *const ZSTD_CCtx_params,\n        param: ZSTD_cParameter,\n        value: *mut ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_setParametersUsingCCtxParams() :\\n  Apply a set of ZSTD_CCtx_params to the compression context.\\n  This can be done even after compression is started,\\n    if nbWorkers==0, this will have no impact until a new compression is started.\\n    if nbWorkers>=1, new parameters will be picked up at next job,\\n       with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated).\"]\n    pub fn ZSTD_CCtx_setParametersUsingCCtxParams(\n        cctx: *mut ZSTD_CCtx,\n        params: *const ZSTD_CCtx_params,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_compressStream2_simpleArgs() :\\n  Same as ZSTD_compressStream2(),\\n  but using only integral types as arguments.\\n  This variant might be helpful for binders from dynamic languages\\n  which have troubles handling structures containing memory pointers.\"]\n    pub fn ZSTD_compressStream2_simpleArgs(\n        cctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        dstPos: *mut usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        srcPos: *mut usize,\n        endOp: ZSTD_EndDirective,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_isFrame() :\\n  Tells if the content of `buffer` starts with a valid Frame Identifier.\\n  Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.\\n  Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.\\n  Note 3 : Skippable Frame Identifiers are considered valid.\"]\n    pub fn ZSTD_isFrame(\n        buffer: *const ::core::ffi::c_void,\n        size: usize,\n    ) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_createDDict_byReference() :\\n  Create a digested dictionary, ready to start decompression operation without startup delay.\\n  Dictionary content is referenced, and therefore stays in dictBuffer.\\n  It is important that dictBuffer outlives DDict,\\n  it must remain read accessible throughout the lifetime of DDict\"]\n    pub fn ZSTD_createDDict_byReference(\n        dictBuffer: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> *mut ZSTD_DDict;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_loadDictionary_byReference() :\\n  Same as ZSTD_DCtx_loadDictionary(),\\n  but references `dict` content instead of copying it into `dctx`.\\n  This saves memory if `dict` remains around.,\\n  However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression.\"]\n    pub fn ZSTD_DCtx_loadDictionary_byReference(\n        dctx: *mut ZSTD_DCtx,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_loadDictionary_advanced() :\\n  Same as ZSTD_DCtx_loadDictionary(),\\n  but gives direct control over\\n  how to load the dictionary (by copy ? by reference ?)\\n  and how to interpret it (automatic ? force raw mode ? full mode only ?).\"]\n    pub fn ZSTD_DCtx_loadDictionary_advanced(\n        dctx: *mut ZSTD_DCtx,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        dictLoadMethod: ZSTD_dictLoadMethod_e,\n        dictContentType: ZSTD_dictContentType_e,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_refPrefix_advanced() :\\n  Same as ZSTD_DCtx_refPrefix(), but gives finer control over\\n  how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?)\"]\n    pub fn ZSTD_DCtx_refPrefix_advanced(\n        dctx: *mut ZSTD_DCtx,\n        prefix: *const ::core::ffi::c_void,\n        prefixSize: usize,\n        dictContentType: ZSTD_dictContentType_e,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_setMaxWindowSize() :\\n  Refuses allocating internal buffers for frames requiring a window size larger than provided limit.\\n  This protects a decoder context from reserving too much memory for itself (potential attack scenario).\\n  This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.\\n  By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT)\\n @return : 0, or an error code (which can be tested using ZSTD_isError()).\"]\n    pub fn ZSTD_DCtx_setMaxWindowSize(\n        dctx: *mut ZSTD_DCtx,\n        maxWindowSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_getParameter() :\\n  Get the requested decompression parameter value, selected by enum ZSTD_dParameter,\\n  and store it into int* value.\\n @return : 0, or an error code (which can be tested with ZSTD_isError()).\"]\n    pub fn ZSTD_DCtx_getParameter(\n        dctx: *mut ZSTD_DCtx,\n        param: ZSTD_dParameter,\n        value: *mut ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_setFormat() :\\n  This function is REDUNDANT. Prefer ZSTD_DCtx_setParameter().\\n  Instruct the decoder context about what kind of data to decode next.\\n  This instruction is mandatory to decode data without a fully-formed header,\\n  such ZSTD_f_zstd1_magicless for example.\\n @return : 0, or an error code (which can be tested using ZSTD_isError()).\"]\n    pub fn ZSTD_DCtx_setFormat(\n        dctx: *mut ZSTD_DCtx,\n        format: ZSTD_format_e,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_decompressStream_simpleArgs() :\\n  Same as ZSTD_decompressStream(),\\n  but using only integral types as arguments.\\n  This can be helpful for binders from dynamic languages\\n  which have troubles handling structures containing memory pointers.\"]\n    pub fn ZSTD_decompressStream_simpleArgs(\n        dctx: *mut ZSTD_DCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        dstPos: *mut usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        srcPos: *mut usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_initCStream_srcSize() :\\n This function is DEPRECATED, and equivalent to:\\n     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\\n     ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)\\n     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);\\n     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);\\n\\n pledgedSrcSize must be correct. If it is not known at init time, use\\n ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs,\\n \\\"0\\\" also disables frame content size field. It may be enabled in the future.\\n This prototype will generate compilation warnings.\"]\n    pub fn ZSTD_initCStream_srcSize(\n        zcs: *mut ZSTD_CStream,\n        compressionLevel: ::core::ffi::c_int,\n        pledgedSrcSize: ::core::ffi::c_ulonglong,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_initCStream_usingDict() :\\n This function is DEPRECATED, and is equivalent to:\\n     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\\n     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);\\n     ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);\\n\\n Creates of an internal CDict (incompatible with static CCtx), except if\\n dict == NULL or dictSize < 8, in which case no dict is used.\\n Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if\\n it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.\\n This prototype will generate compilation warnings.\"]\n    pub fn ZSTD_initCStream_usingDict(\n        zcs: *mut ZSTD_CStream,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_initCStream_advanced() :\\n This function is DEPRECATED, and is equivalent to:\\n     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\\n     ZSTD_CCtx_setParams(zcs, params);\\n     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);\\n     ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);\\n\\n dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy.\\n pledgedSrcSize must be correct.\\n If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.\\n This prototype will generate compilation warnings.\"]\n    pub fn ZSTD_initCStream_advanced(\n        zcs: *mut ZSTD_CStream,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        params: ZSTD_parameters,\n        pledgedSrcSize: ::core::ffi::c_ulonglong,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_initCStream_usingCDict() :\\n This function is DEPRECATED, and equivalent to:\\n     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\\n     ZSTD_CCtx_refCDict(zcs, cdict);\\n\\n note : cdict will just be referenced, and must outlive compression session\\n This prototype will generate compilation warnings.\"]\n    pub fn ZSTD_initCStream_usingCDict(\n        zcs: *mut ZSTD_CStream,\n        cdict: *const ZSTD_CDict,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_initCStream_usingCDict_advanced() :\\n   This function is DEPRECATED, and is equivalent to:\\n     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\\n     ZSTD_CCtx_setFParams(zcs, fParams);\\n     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);\\n     ZSTD_CCtx_refCDict(zcs, cdict);\\n\\n same as ZSTD_initCStream_usingCDict(), with control over frame parameters.\\n pledgedSrcSize must be correct. If srcSize is not known at init time, use\\n value ZSTD_CONTENTSIZE_UNKNOWN.\\n This prototype will generate compilation warnings.\"]\n    pub fn ZSTD_initCStream_usingCDict_advanced(\n        zcs: *mut ZSTD_CStream,\n        cdict: *const ZSTD_CDict,\n        fParams: ZSTD_frameParameters,\n        pledgedSrcSize: ::core::ffi::c_ulonglong,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_resetCStream() :\\n This function is DEPRECATED, and is equivalent to:\\n     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\\n     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);\\n Note: ZSTD_resetCStream() interprets pledgedSrcSize == 0 as ZSTD_CONTENTSIZE_UNKNOWN, but\\n       ZSTD_CCtx_setPledgedSrcSize() does not do the same, so ZSTD_CONTENTSIZE_UNKNOWN must be\\n       explicitly specified.\\n\\n  start a new frame, using same parameters from previous frame.\\n  This is typically useful to skip dictionary loading stage, since it will reuse it in-place.\\n  Note that zcs must be init at least once before using ZSTD_resetCStream().\\n  If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.\\n  If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.\\n  For the time being, pledgedSrcSize==0 is interpreted as \\\"srcSize unknown\\\" for compatibility with older programs,\\n  but it will change to mean \\\"empty\\\" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.\\n @return : 0, or an error code (which can be tested using ZSTD_isError())\\n  This prototype will generate compilation warnings.\"]\n    pub fn ZSTD_resetCStream(\n        zcs: *mut ZSTD_CStream,\n        pledgedSrcSize: ::core::ffi::c_ulonglong,\n    ) -> usize;\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_frameProgression {\n    pub ingested: ::core::ffi::c_ulonglong,\n    pub consumed: ::core::ffi::c_ulonglong,\n    pub produced: ::core::ffi::c_ulonglong,\n    pub flushed: ::core::ffi::c_ulonglong,\n    pub currentJobID: ::core::ffi::c_uint,\n    pub nbActiveWorkers: ::core::ffi::c_uint,\n}\nextern \"C\" {\n    pub fn ZSTD_getFrameProgression(\n        cctx: *const ZSTD_CCtx,\n    ) -> ZSTD_frameProgression;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_toFlushNow() :\\n  Tell how many bytes are ready to be flushed immediately.\\n  Useful for multithreading scenarios (nbWorkers >= 1).\\n  Probe the oldest active job, defined as oldest job not yet entirely flushed,\\n  and check its output buffer.\\n @return : amount of data stored in oldest job and ready to be flushed immediately.\\n  if @return == 0, it means either :\\n  + there is no active job (could be checked with ZSTD_frameProgression()), or\\n  + oldest job is still actively compressing data,\\n    but everything it has produced has also been flushed so far,\\n    therefore flush speed is limited by production speed of oldest job\\n    irrespective of the speed of concurrent (and newer) jobs.\"]\n    pub fn ZSTD_toFlushNow(cctx: *mut ZSTD_CCtx) -> usize;\n}\nextern \"C\" {\n    #[doc = \" This function is deprecated, and is equivalent to:\\n\\n     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);\\n     ZSTD_DCtx_loadDictionary(zds, dict, dictSize);\\n\\n note: no dictionary will be used if dict == NULL or dictSize < 8\"]\n    pub fn ZSTD_initDStream_usingDict(\n        zds: *mut ZSTD_DStream,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" This function is deprecated, and is equivalent to:\\n\\n     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);\\n     ZSTD_DCtx_refDDict(zds, ddict);\\n\\n note : ddict is referenced, it must outlive decompression session\"]\n    pub fn ZSTD_initDStream_usingDDict(\n        zds: *mut ZSTD_DStream,\n        ddict: *const ZSTD_DDict,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" This function is deprecated, and is equivalent to:\\n\\n     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);\\n\\n reuse decompression parameters from previous init; saves dictionary loading\"]\n    pub fn ZSTD_resetDStream(zds: *mut ZSTD_DStream) -> usize;\n}\npub type ZSTD_sequenceProducer_F = ::core::option::Option<\n    unsafe extern \"C\" fn(\n        sequenceProducerState: *mut ::core::ffi::c_void,\n        outSeqs: *mut ZSTD_Sequence,\n        outSeqsCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        compressionLevel: ::core::ffi::c_int,\n        windowSize: usize,\n    ) -> usize,\n>;\nextern \"C\" {\n    #[doc = \" ZSTD_registerSequenceProducer() :\\n Instruct zstd to use a block-level external sequence producer function.\\n\\n The sequenceProducerState must be initialized by the caller, and the caller is\\n responsible for managing its lifetime. This parameter is sticky across\\n compressions. It will remain set until the user explicitly resets compression\\n parameters.\\n\\n Sequence producer registration is considered to be an \\\"advanced parameter\\\",\\n part of the \\\"advanced API\\\". This means it will only have an effect on compression\\n APIs which respect advanced parameters, such as compress2() and compressStream2().\\n Older compression APIs such as compressCCtx(), which predate the introduction of\\n \\\"advanced parameters\\\", will ignore any external sequence producer setting.\\n\\n The sequence producer can be \\\"cleared\\\" by registering a NULL function pointer. This\\n removes all limitations described above in the \\\"LIMITATIONS\\\" section of the API docs.\\n\\n The user is strongly encouraged to read the full API documentation (above) before\\n calling this function.\"]\n    pub fn ZSTD_registerSequenceProducer(\n        cctx: *mut ZSTD_CCtx,\n        sequenceProducerState: *mut ::core::ffi::c_void,\n        sequenceProducer: ZSTD_sequenceProducer_F,\n    );\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtxParams_registerSequenceProducer() :\\n Same as ZSTD_registerSequenceProducer(), but operates on ZSTD_CCtx_params.\\n This is used for accurate size estimation with ZSTD_estimateCCtxSize_usingCCtxParams(),\\n which is needed when creating a ZSTD_CCtx with ZSTD_initStaticCCtx().\\n\\n If you are using the external sequence producer API in a scenario where ZSTD_initStaticCCtx()\\n is required, then this function is for you. Otherwise, you probably don't need it.\\n\\n See tests/zstreamtest.c for example usage.\"]\n    pub fn ZSTD_CCtxParams_registerSequenceProducer(\n        params: *mut ZSTD_CCtx_params,\n        sequenceProducerState: *mut ::core::ffi::c_void,\n        sequenceProducer: ZSTD_sequenceProducer_F,\n    );\n}\nextern \"C\" {\n    #[doc = \"Buffer-less streaming compression (synchronous mode)\\n\\nA ZSTD_CCtx object is required to track streaming operations.\\nUse ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.\\nZSTD_CCtx object can be reused multiple times within successive compression operations.\\n\\nStart by initializing a context.\\nUse ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression.\\n\\nThen, consume your input using ZSTD_compressContinue().\\nThere are some important considerations to keep in mind when using this advanced function :\\n- ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.\\n- Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.\\n- Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.\\nWorst case evaluation is provided by ZSTD_compressBound().\\nZSTD_compressContinue() doesn't guarantee recover after a failed compression.\\n- ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog).\\nIt remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks)\\n- ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps.\\nIn which case, it will \\\"discard\\\" the relevant memory section from its history.\\n\\nFinish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.\\nIt's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.\\nWithout last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.\\n\\n`ZSTD_CCtx` object can be reused (ZSTD_compressBegin()) to compress again.\"]\n    pub fn ZSTD_compressBegin(\n        cctx: *mut ZSTD_CCtx,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_compressBegin_usingDict(\n        cctx: *mut ZSTD_CCtx,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_compressBegin_usingCDict(\n        cctx: *mut ZSTD_CCtx,\n        cdict: *const ZSTD_CDict,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_copyCCtx(\n        cctx: *mut ZSTD_CCtx,\n        preparedCCtx: *const ZSTD_CCtx,\n        pledgedSrcSize: ::core::ffi::c_ulonglong,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_compressContinue(\n        cctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_compressEnd(\n        cctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_compressBegin_advanced(\n        cctx: *mut ZSTD_CCtx,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        params: ZSTD_parameters,\n        pledgedSrcSize: ::core::ffi::c_ulonglong,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_compressBegin_usingCDict_advanced(\n        cctx: *mut ZSTD_CCtx,\n        cdict: *const ZSTD_CDict,\n        fParams: ZSTD_frameParameters,\n        pledgedSrcSize: ::core::ffi::c_ulonglong,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \"Buffer-less streaming decompression (synchronous mode)\\n\\nA ZSTD_DCtx object is required to track streaming operations.\\nUse ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.\\nA ZSTD_DCtx object can be reused multiple times.\\n\\nFirst typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().\\nFrame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.\\nData fragment must be large enough to ensure successful decoding.\\n`ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.\\nresult  : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.\\n>0 : `srcSize` is too small, please provide at least result bytes on next attempt.\\nerrorCode, which can be tested using ZSTD_isError().\\n\\nIt fills a ZSTD_FrameHeader structure with important information to correctly decode the frame,\\nsuch as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).\\nNote that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.\\nAs a consequence, check that values remain within valid application range.\\nFor example, do not allocate memory blindly, check that `windowSize` is within expectation.\\nEach application can set its own limits, depending on local restrictions.\\nFor extended interoperability, it is recommended to support `windowSize` of at least 8 MB.\\n\\nZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes.\\nZSTD_decompressContinue() is very sensitive to contiguity,\\nif 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,\\nor that previous contiguous segment is large enough to properly handle maximum back-reference distance.\\nThere are multiple ways to guarantee this condition.\\n\\nThe most memory efficient way is to use a round buffer of sufficient size.\\nSufficient size is determined by invoking ZSTD_decodingBufferSize_min(),\\nwhich can return an error code if required value is too large for current system (in 32-bits mode).\\nIn a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,\\nup to the moment there is not enough room left in the buffer to guarantee decoding another full block,\\nwhich maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.\\nAt which point, decoding can resume from the beginning of the buffer.\\nNote that already decoded data stored in the buffer should be flushed before being overwritten.\\n\\nThere are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory.\\n\\nFinally, if you control the compression process, you can also ignore all buffer size rules,\\nas long as the encoder and decoder progress in \\\"lock-step\\\",\\naka use exactly the same buffer sizes, break contiguity at the same place, etc.\\n\\nOnce buffers are setup, start decompression, with ZSTD_decompressBegin().\\nIf decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().\\n\\nThen use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.\\nZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().\\nZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.\\n\\nresult of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).\\nIt can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.\\nIt can also be an error code, which can be tested with ZSTD_isError().\\n\\nA frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.\\nContext can then be reset to start a new decompression.\\n\\nNote : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType().\\nThis information is not required to properly decode a frame.\\n\\n== Special case : skippable frames ==\\n\\nSkippable frames allow integration of user-defined data into a flow of concatenated frames.\\nSkippable frames will be ignored (skipped) by decompressor.\\nThe format of skippable frames is as follows :\\na) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F\\nb) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits\\nc) Frame Content - any content (User Data) of length equal to Frame Size\\nFor skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame.\\nFor skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content.\"]\n    pub fn ZSTD_decodingBufferSize_min(\n        windowSize: ::core::ffi::c_ulonglong,\n        frameContentSize: ::core::ffi::c_ulonglong,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_decompressBegin(dctx: *mut ZSTD_DCtx) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_decompressBegin_usingDict(\n        dctx: *mut ZSTD_DCtx,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_decompressBegin_usingDDict(\n        dctx: *mut ZSTD_DCtx,\n        ddict: *const ZSTD_DDict,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_nextSrcSizeToDecompress(dctx: *mut ZSTD_DCtx) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_decompressContinue(\n        dctx: *mut ZSTD_DCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_copyDCtx(dctx: *mut ZSTD_DCtx, preparedDCtx: *const ZSTD_DCtx);\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_nextInputType_e {\n    ZSTDnit_frameHeader = 0,\n    ZSTDnit_blockHeader = 1,\n    ZSTDnit_block = 2,\n    ZSTDnit_lastBlock = 3,\n    ZSTDnit_checksum = 4,\n    ZSTDnit_skippableFrame = 5,\n}\nextern \"C\" {\n    pub fn ZSTD_nextInputType(dctx: *mut ZSTD_DCtx) -> ZSTD_nextInputType_e;\n}\nextern \"C\" {\n    #[doc = \"This API is deprecated in favor of the regular compression API.\\nYou can get the frame header down to 2 bytes by setting:\\n- ZSTD_c_format = ZSTD_f_zstd1_magicless\\n- ZSTD_c_contentSizeFlag = 0\\n- ZSTD_c_checksumFlag = 0\\n- ZSTD_c_dictIDFlag = 0\\n\\nThis API is not as well tested as our normal API, so we recommend not using it.\\nWe will be removing it in a future version. If the normal API doesn't provide\\nthe functionality you need, please open a GitHub issue.\\n\\nBlock functions produce and decode raw zstd blocks, without frame metadata.\\nFrame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes).\\nBut users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.\\n\\nA few rules to respect :\\n- Compressing and decompressing require a context structure\\n+ Use ZSTD_createCCtx() and ZSTD_createDCtx()\\n- It is necessary to init context before starting\\n+ compression : any ZSTD_compressBegin*() variant, including with dictionary\\n+ decompression : any ZSTD_decompressBegin*() variant, including with dictionary\\n- Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB\\n+ If input is larger than a block size, it's necessary to split input data into multiple blocks\\n+ For inputs larger than a single block, consider using regular ZSTD_compress() instead.\\nFrame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block.\\n- When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) !\\n===> In which case, nothing is produced into `dst` !\\n+ User __must__ test for such outcome and deal directly with uncompressed data\\n+ A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0.\\nDoing so would mess up with statistics history, leading to potential data corruption.\\n+ ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !!\\n+ In case of multiple successive blocks, should some of them be uncompressed,\\ndecoder must be informed of their existence in order to follow proper history.\\nUse ZSTD_insertBlock() for such a case.\"]\n    pub fn ZSTD_getBlockSize(cctx: *const ZSTD_CCtx) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_compressBlock(\n        cctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_decompressBlock(\n        dctx: *mut ZSTD_DCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_insertBlock(\n        dctx: *mut ZSTD_DCtx,\n        blockStart: *const ::core::ffi::c_void,\n        blockSize: usize,\n    ) -> usize;\n}\n"
  },
  {
    "path": "zstd-safe/zstd-sys/src/bindings_zstd_seekable.rs",
    "content": "/*\nThis file is auto-generated from the public API of the zstd library.\nIt is released under the same BSD license.\n\nBSD License\n\nFor Zstandard software\n\nCopyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n\n * Neither the name Facebook, nor Meta, nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n*/\n/* automatically generated by rust-bindgen 0.71.1 */\n\npub const ZSTD_seekTableFooterSize: u32 = 9;\npub const ZSTD_SEEKABLE_MAGICNUMBER: u32 = 2408770225;\npub const ZSTD_SEEKABLE_MAXFRAMES: u32 = 134217728;\npub const ZSTD_SEEKABLE_MAX_FRAME_DECOMPRESSED_SIZE: u32 = 1073741824;\npub const ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE: i32 = -2;\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_seekable_CStream_s {\n    _unused: [u8; 0],\n}\npub type ZSTD_seekable_CStream = ZSTD_seekable_CStream_s;\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_seekable_s {\n    _unused: [u8; 0],\n}\npub type ZSTD_seekable = ZSTD_seekable_s;\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_seekTable_s {\n    _unused: [u8; 0],\n}\npub type ZSTD_seekTable = ZSTD_seekTable_s;\nextern \"C\" {\n    pub fn ZSTD_seekable_createCStream() -> *mut ZSTD_seekable_CStream;\n}\nextern \"C\" {\n    pub fn ZSTD_seekable_freeCStream(zcs: *mut ZSTD_seekable_CStream)\n        -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_seekable_initCStream(\n        zcs: *mut ZSTD_seekable_CStream,\n        compressionLevel: ::core::ffi::c_int,\n        checksumFlag: ::core::ffi::c_int,\n        maxFrameSize: ::core::ffi::c_uint,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_seekable_compressStream(\n        zcs: *mut ZSTD_seekable_CStream,\n        output: *mut ZSTD_outBuffer,\n        input: *mut ZSTD_inBuffer,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_seekable_endFrame(\n        zcs: *mut ZSTD_seekable_CStream,\n        output: *mut ZSTD_outBuffer,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_seekable_endStream(\n        zcs: *mut ZSTD_seekable_CStream,\n        output: *mut ZSTD_outBuffer,\n    ) -> usize;\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_frameLog_s {\n    _unused: [u8; 0],\n}\npub type ZSTD_frameLog = ZSTD_frameLog_s;\nextern \"C\" {\n    pub fn ZSTD_seekable_createFrameLog(\n        checksumFlag: ::core::ffi::c_int,\n    ) -> *mut ZSTD_frameLog;\n}\nextern \"C\" {\n    pub fn ZSTD_seekable_freeFrameLog(fl: *mut ZSTD_frameLog) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_seekable_logFrame(\n        fl: *mut ZSTD_frameLog,\n        compressedSize: ::core::ffi::c_uint,\n        decompressedSize: ::core::ffi::c_uint,\n        checksum: ::core::ffi::c_uint,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_seekable_writeSeekTable(\n        fl: *mut ZSTD_frameLog,\n        output: *mut ZSTD_outBuffer,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_seekable_create() -> *mut ZSTD_seekable;\n}\nextern \"C\" {\n    pub fn ZSTD_seekable_free(zs: *mut ZSTD_seekable) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_seekable_initBuff(\n        zs: *mut ZSTD_seekable,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_seekable_decompress(\n        zs: *mut ZSTD_seekable,\n        dst: *mut ::core::ffi::c_void,\n        dstSize: usize,\n        offset: ::core::ffi::c_ulonglong,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_seekable_decompressFrame(\n        zs: *mut ZSTD_seekable,\n        dst: *mut ::core::ffi::c_void,\n        dstSize: usize,\n        frameIndex: ::core::ffi::c_uint,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_seekable_getNumFrames(\n        zs: *const ZSTD_seekable,\n    ) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    pub fn ZSTD_seekable_getFrameCompressedOffset(\n        zs: *const ZSTD_seekable,\n        frameIndex: ::core::ffi::c_uint,\n    ) -> ::core::ffi::c_ulonglong;\n}\nextern \"C\" {\n    pub fn ZSTD_seekable_getFrameDecompressedOffset(\n        zs: *const ZSTD_seekable,\n        frameIndex: ::core::ffi::c_uint,\n    ) -> ::core::ffi::c_ulonglong;\n}\nextern \"C\" {\n    pub fn ZSTD_seekable_getFrameCompressedSize(\n        zs: *const ZSTD_seekable,\n        frameIndex: ::core::ffi::c_uint,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_seekable_getFrameDecompressedSize(\n        zs: *const ZSTD_seekable,\n        frameIndex: ::core::ffi::c_uint,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_seekable_offsetToFrameIndex(\n        zs: *const ZSTD_seekable,\n        offset: ::core::ffi::c_ulonglong,\n    ) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    pub fn ZSTD_seekTable_create_fromSeekable(\n        zs: *const ZSTD_seekable,\n    ) -> *mut ZSTD_seekTable;\n}\nextern \"C\" {\n    pub fn ZSTD_seekTable_free(st: *mut ZSTD_seekTable) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_seekTable_getNumFrames(\n        st: *const ZSTD_seekTable,\n    ) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    pub fn ZSTD_seekTable_getFrameCompressedOffset(\n        st: *const ZSTD_seekTable,\n        frameIndex: ::core::ffi::c_uint,\n    ) -> ::core::ffi::c_ulonglong;\n}\nextern \"C\" {\n    pub fn ZSTD_seekTable_getFrameDecompressedOffset(\n        st: *const ZSTD_seekTable,\n        frameIndex: ::core::ffi::c_uint,\n    ) -> ::core::ffi::c_ulonglong;\n}\nextern \"C\" {\n    pub fn ZSTD_seekTable_getFrameCompressedSize(\n        st: *const ZSTD_seekTable,\n        frameIndex: ::core::ffi::c_uint,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_seekTable_getFrameDecompressedSize(\n        st: *const ZSTD_seekTable,\n        frameIndex: ::core::ffi::c_uint,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_seekTable_offsetToFrameIndex(\n        st: *const ZSTD_seekTable,\n        offset: ::core::ffi::c_ulonglong,\n    ) -> ::core::ffi::c_uint;\n}\npub type ZSTD_seekable_read = ::core::option::Option<\n    unsafe extern \"C\" fn(\n        opaque: *mut ::core::ffi::c_void,\n        buffer: *mut ::core::ffi::c_void,\n        n: usize,\n    ) -> ::core::ffi::c_int,\n>;\npub type ZSTD_seekable_seek = ::core::option::Option<\n    unsafe extern \"C\" fn(\n        opaque: *mut ::core::ffi::c_void,\n        offset: ::core::ffi::c_longlong,\n        origin: ::core::ffi::c_int,\n    ) -> ::core::ffi::c_int,\n>;\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_seekable_customFile {\n    pub opaque: *mut ::core::ffi::c_void,\n    pub read: ZSTD_seekable_read,\n    pub seek: ZSTD_seekable_seek,\n}\nextern \"C\" {\n    pub fn ZSTD_seekable_initAdvanced(\n        zs: *mut ZSTD_seekable,\n        src: ZSTD_seekable_customFile,\n    ) -> usize;\n}\n"
  },
  {
    "path": "zstd-safe/zstd-sys/src/bindings_zstd_std_experimental.rs",
    "content": "/*\nThis file is auto-generated from the public API of the zstd library.\nIt is released under the same BSD license.\n\nBSD License\n\nFor Zstandard software\n\nCopyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n\n * Neither the name Facebook, nor Meta, nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n*/\n/* automatically generated by rust-bindgen 0.66.1 */\n\npub const ZSTD_VERSION_MAJOR: u32 = 1;\npub const ZSTD_VERSION_MINOR: u32 = 5;\npub const ZSTD_VERSION_RELEASE: u32 = 5;\npub const ZSTD_VERSION_NUMBER: u32 = 10505;\npub const ZSTD_CLEVEL_DEFAULT: u32 = 3;\npub const ZSTD_MAGICNUMBER: u32 = 4247762216;\npub const ZSTD_MAGIC_DICTIONARY: u32 = 3962610743;\npub const ZSTD_MAGIC_SKIPPABLE_START: u32 = 407710288;\npub const ZSTD_MAGIC_SKIPPABLE_MASK: u32 = 4294967280;\npub const ZSTD_BLOCKSIZELOG_MAX: u32 = 17;\npub const ZSTD_BLOCKSIZE_MAX: u32 = 131072;\npub const ZSTD_CONTENTSIZE_UNKNOWN: i32 = -1;\npub const ZSTD_CONTENTSIZE_ERROR: i32 = -2;\npub const ZSTD_FRAMEHEADERSIZE_MAX: u32 = 18;\npub const ZSTD_SKIPPABLEHEADERSIZE: u32 = 8;\npub const ZSTD_WINDOWLOG_MAX_32: u32 = 30;\npub const ZSTD_WINDOWLOG_MAX_64: u32 = 31;\npub const ZSTD_WINDOWLOG_MIN: u32 = 10;\npub const ZSTD_HASHLOG_MIN: u32 = 6;\npub const ZSTD_CHAINLOG_MAX_32: u32 = 29;\npub const ZSTD_CHAINLOG_MAX_64: u32 = 30;\npub const ZSTD_CHAINLOG_MIN: u32 = 6;\npub const ZSTD_SEARCHLOG_MIN: u32 = 1;\npub const ZSTD_MINMATCH_MAX: u32 = 7;\npub const ZSTD_MINMATCH_MIN: u32 = 3;\npub const ZSTD_TARGETLENGTH_MAX: u32 = 131072;\npub const ZSTD_TARGETLENGTH_MIN: u32 = 0;\npub const ZSTD_BLOCKSIZE_MAX_MIN: u32 = 1024;\npub const ZSTD_OVERLAPLOG_MIN: u32 = 0;\npub const ZSTD_OVERLAPLOG_MAX: u32 = 9;\npub const ZSTD_WINDOWLOG_LIMIT_DEFAULT: u32 = 27;\npub const ZSTD_LDM_HASHLOG_MIN: u32 = 6;\npub const ZSTD_LDM_MINMATCH_MIN: u32 = 4;\npub const ZSTD_LDM_MINMATCH_MAX: u32 = 4096;\npub const ZSTD_LDM_BUCKETSIZELOG_MIN: u32 = 1;\npub const ZSTD_LDM_BUCKETSIZELOG_MAX: u32 = 8;\npub const ZSTD_LDM_HASHRATELOG_MIN: u32 = 0;\npub const ZSTD_TARGETCBLOCKSIZE_MIN: u32 = 64;\npub const ZSTD_TARGETCBLOCKSIZE_MAX: u32 = 131072;\npub const ZSTD_SRCSIZEHINT_MIN: u32 = 0;\nextern \"C\" {\n    #[doc = \" ZSTD_versionNumber() :\\n  Return runtime library version, the value is (MAJOR*100*100 + MINOR*100 + RELEASE).\"]\n    pub fn ZSTD_versionNumber() -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_versionString() :\\n  Return runtime library version, like \\\"1.4.5\\\". Requires v1.3.0+.\"]\n    pub fn ZSTD_versionString() -> *const ::core::ffi::c_char;\n}\nextern \"C\" {\n    #[doc = \"  Simple API\\n/\\n/*! ZSTD_compress() :\\n  Compresses `src` content as a single zstd compressed frame into already allocated `dst`.\\n  NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have\\n        enough space to successfully compress the data.\\n  @return : compressed size written into `dst` (<= `dstCapacity),\\n            or an error code if it fails (which can be tested using ZSTD_isError()).\"]\n    pub fn ZSTD_compress(\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_decompress() :\\n  `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.\\n  `dstCapacity` is an upper bound of originalSize to regenerate.\\n  If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.\\n  @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),\\n            or an errorCode if it fails (which can be tested using ZSTD_isError()).\"]\n    pub fn ZSTD_decompress(\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        compressedSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_getFrameContentSize(\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> ::core::ffi::c_ulonglong;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_getDecompressedSize() :\\n  NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize().\\n  Both functions work the same way, but ZSTD_getDecompressedSize() blends\\n  \\\"empty\\\", \\\"unknown\\\" and \\\"error\\\" results to the same return value (0),\\n  while ZSTD_getFrameContentSize() gives them separate return values.\\n @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise.\"]\n    pub fn ZSTD_getDecompressedSize(\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> ::core::ffi::c_ulonglong;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_findFrameCompressedSize() : Requires v1.4.0+\\n `src` should point to the start of a ZSTD frame or skippable frame.\\n `srcSize` must be >= first frame size\\n @return : the compressed size of the first frame starting at `src`,\\n           suitable to pass as `srcSize` to `ZSTD_decompress` or similar,\\n        or an error code if input is invalid\"]\n    pub fn ZSTD_findFrameCompressedSize(\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_compressBound(srcSize: usize) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_isError(code: usize) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    pub fn ZSTD_getErrorName(code: usize) -> *const ::core::ffi::c_char;\n}\nextern \"C\" {\n    pub fn ZSTD_minCLevel() -> ::core::ffi::c_int;\n}\nextern \"C\" {\n    pub fn ZSTD_maxCLevel() -> ::core::ffi::c_int;\n}\nextern \"C\" {\n    pub fn ZSTD_defaultCLevel() -> ::core::ffi::c_int;\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_CCtx_s {\n    _unused: [u8; 0],\n}\n#[doc = \"  Explicit context\"]\npub type ZSTD_CCtx = ZSTD_CCtx_s;\nextern \"C\" {\n    pub fn ZSTD_createCCtx() -> *mut ZSTD_CCtx;\n}\nextern \"C\" {\n    pub fn ZSTD_freeCCtx(cctx: *mut ZSTD_CCtx) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_compressCCtx() :\\n  Same as ZSTD_compress(), using an explicit ZSTD_CCtx.\\n  Important : in order to behave similarly to `ZSTD_compress()`,\\n  this function compresses at requested compression level,\\n  __ignoring any other parameter__ .\\n  If any advanced parameter was set using the advanced API,\\n  they will all be reset. Only `compressionLevel` remains.\"]\n    pub fn ZSTD_compressCCtx(\n        cctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> usize;\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_DCtx_s {\n    _unused: [u8; 0],\n}\npub type ZSTD_DCtx = ZSTD_DCtx_s;\nextern \"C\" {\n    pub fn ZSTD_createDCtx() -> *mut ZSTD_DCtx;\n}\nextern \"C\" {\n    pub fn ZSTD_freeDCtx(dctx: *mut ZSTD_DCtx) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_decompressDCtx() :\\n  Same as ZSTD_decompress(),\\n  requires an allocated ZSTD_DCtx.\\n  Compatible with sticky parameters.\"]\n    pub fn ZSTD_decompressDCtx(\n        dctx: *mut ZSTD_DCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\n#[repr(u32)]\n#[doc = \"  Advanced compression API (Requires v1.4.0+)\"]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_strategy {\n    ZSTD_fast = 1,\n    ZSTD_dfast = 2,\n    ZSTD_greedy = 3,\n    ZSTD_lazy = 4,\n    ZSTD_lazy2 = 5,\n    ZSTD_btlazy2 = 6,\n    ZSTD_btopt = 7,\n    ZSTD_btultra = 8,\n    ZSTD_btultra2 = 9,\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_cParameter {\n    ZSTD_c_compressionLevel = 100,\n    ZSTD_c_windowLog = 101,\n    ZSTD_c_hashLog = 102,\n    ZSTD_c_chainLog = 103,\n    ZSTD_c_searchLog = 104,\n    ZSTD_c_minMatch = 105,\n    ZSTD_c_targetLength = 106,\n    ZSTD_c_strategy = 107,\n    ZSTD_c_enableLongDistanceMatching = 160,\n    ZSTD_c_ldmHashLog = 161,\n    ZSTD_c_ldmMinMatch = 162,\n    ZSTD_c_ldmBucketSizeLog = 163,\n    ZSTD_c_ldmHashRateLog = 164,\n    ZSTD_c_contentSizeFlag = 200,\n    ZSTD_c_checksumFlag = 201,\n    ZSTD_c_dictIDFlag = 202,\n    ZSTD_c_nbWorkers = 400,\n    ZSTD_c_jobSize = 401,\n    ZSTD_c_overlapLog = 402,\n    ZSTD_c_experimentalParam1 = 500,\n    ZSTD_c_experimentalParam2 = 10,\n    ZSTD_c_experimentalParam3 = 1000,\n    ZSTD_c_experimentalParam4 = 1001,\n    ZSTD_c_experimentalParam5 = 1002,\n    ZSTD_c_experimentalParam6 = 1003,\n    ZSTD_c_experimentalParam7 = 1004,\n    ZSTD_c_experimentalParam8 = 1005,\n    ZSTD_c_experimentalParam9 = 1006,\n    ZSTD_c_experimentalParam10 = 1007,\n    ZSTD_c_experimentalParam11 = 1008,\n    ZSTD_c_experimentalParam12 = 1009,\n    ZSTD_c_experimentalParam13 = 1010,\n    ZSTD_c_experimentalParam14 = 1011,\n    ZSTD_c_experimentalParam15 = 1012,\n    ZSTD_c_experimentalParam16 = 1013,\n    ZSTD_c_experimentalParam17 = 1014,\n    ZSTD_c_experimentalParam18 = 1015,\n    ZSTD_c_experimentalParam19 = 1016,\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_bounds {\n    pub error: usize,\n    pub lowerBound: ::core::ffi::c_int,\n    pub upperBound: ::core::ffi::c_int,\n}\nextern \"C\" {\n    #[doc = \" ZSTD_cParam_getBounds() :\\n  All parameters must belong to an interval with lower and upper bounds,\\n  otherwise they will either trigger an error or be automatically clamped.\\n @return : a structure, ZSTD_bounds, which contains\\n         - an error status field, which must be tested using ZSTD_isError()\\n         - lower and upper bounds, both inclusive\"]\n    pub fn ZSTD_cParam_getBounds(cParam: ZSTD_cParameter) -> ZSTD_bounds;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_setParameter() :\\n  Set one compression parameter, selected by enum ZSTD_cParameter.\\n  All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds().\\n  Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).\\n  Setting a parameter is generally only possible during frame initialization (before starting compression).\\n  Exception : when using multi-threading mode (nbWorkers >= 1),\\n              the following parameters can be updated _during_ compression (within same frame):\\n              => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy.\\n              new parameters will be active for next job only (after a flush()).\\n @return : an error code (which can be tested using ZSTD_isError()).\"]\n    pub fn ZSTD_CCtx_setParameter(\n        cctx: *mut ZSTD_CCtx,\n        param: ZSTD_cParameter,\n        value: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_setPledgedSrcSize() :\\n  Total input data size to be compressed as a single frame.\\n  Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag.\\n  This value will also be controlled at end of frame, and trigger an error if not respected.\\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\\n  Note 1 : pledgedSrcSize==0 actually means zero, aka an empty frame.\\n           In order to mean \\\"unknown content size\\\", pass constant ZSTD_CONTENTSIZE_UNKNOWN.\\n           ZSTD_CONTENTSIZE_UNKNOWN is default value for any new frame.\\n  Note 2 : pledgedSrcSize is only valid once, for the next frame.\\n           It's discarded at the end of the frame, and replaced by ZSTD_CONTENTSIZE_UNKNOWN.\\n  Note 3 : Whenever all input data is provided and consumed in a single round,\\n           for example with ZSTD_compress2(),\\n           or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end),\\n           this value is automatically overridden by srcSize instead.\"]\n    pub fn ZSTD_CCtx_setPledgedSrcSize(\n        cctx: *mut ZSTD_CCtx,\n        pledgedSrcSize: ::core::ffi::c_ulonglong,\n    ) -> usize;\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_ResetDirective {\n    ZSTD_reset_session_only = 1,\n    ZSTD_reset_parameters = 2,\n    ZSTD_reset_session_and_parameters = 3,\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_reset() :\\n  There are 2 different things that can be reset, independently or jointly :\\n  - The session : will stop compressing current frame, and make CCtx ready to start a new one.\\n                  Useful after an error, or to interrupt any ongoing compression.\\n                  Any internal data not yet flushed is cancelled.\\n                  Compression parameters and dictionary remain unchanged.\\n                  They will be used to compress next frame.\\n                  Resetting session never fails.\\n  - The parameters : changes all parameters back to \\\"default\\\".\\n                  This also removes any reference to any dictionary or external sequence producer.\\n                  Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)\\n                  otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())\\n  - Both : similar to resetting the session, followed by resetting parameters.\"]\n    pub fn ZSTD_CCtx_reset(\n        cctx: *mut ZSTD_CCtx,\n        reset: ZSTD_ResetDirective,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_compress2() :\\n  Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.\\n  ZSTD_compress2() always starts a new frame.\\n  Should cctx hold data from a previously unfinished frame, everything about it is forgotten.\\n  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()\\n  - The function is always blocking, returns when compression is completed.\\n  NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have\\n        enough space to successfully compress the data, though it is possible it fails for other reasons.\\n @return : compressed size written into `dst` (<= `dstCapacity),\\n           or an error code if it fails (which can be tested using ZSTD_isError()).\"]\n    pub fn ZSTD_compress2(\n        cctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\n#[repr(u32)]\n#[doc = \"  Advanced decompression API (Requires v1.4.0+)\"]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_dParameter {\n    ZSTD_d_windowLogMax = 100,\n    ZSTD_d_experimentalParam1 = 1000,\n    ZSTD_d_experimentalParam2 = 1001,\n    ZSTD_d_experimentalParam3 = 1002,\n    ZSTD_d_experimentalParam4 = 1003,\n    ZSTD_d_experimentalParam5 = 1004,\n}\nextern \"C\" {\n    #[doc = \" ZSTD_dParam_getBounds() :\\n  All parameters must belong to an interval with lower and upper bounds,\\n  otherwise they will either trigger an error or be automatically clamped.\\n @return : a structure, ZSTD_bounds, which contains\\n         - an error status field, which must be tested using ZSTD_isError()\\n         - both lower and upper bounds, inclusive\"]\n    pub fn ZSTD_dParam_getBounds(dParam: ZSTD_dParameter) -> ZSTD_bounds;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_setParameter() :\\n  Set one compression parameter, selected by enum ZSTD_dParameter.\\n  All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds().\\n  Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).\\n  Setting a parameter is only possible during frame initialization (before starting decompression).\\n @return : 0, or an error code (which can be tested using ZSTD_isError()).\"]\n    pub fn ZSTD_DCtx_setParameter(\n        dctx: *mut ZSTD_DCtx,\n        param: ZSTD_dParameter,\n        value: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_reset() :\\n  Return a DCtx to clean state.\\n  Session and parameters can be reset jointly or separately.\\n  Parameters can only be reset when no active frame is being decompressed.\\n @return : 0, or an error code, which can be tested with ZSTD_isError()\"]\n    pub fn ZSTD_DCtx_reset(\n        dctx: *mut ZSTD_DCtx,\n        reset: ZSTD_ResetDirective,\n    ) -> usize;\n}\n#[doc = \"  Streaming\"]\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_inBuffer_s {\n    #[doc = \"< start of input buffer\"]\n    pub src: *const ::core::ffi::c_void,\n    #[doc = \"< size of input buffer\"]\n    pub size: usize,\n    #[doc = \"< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size\"]\n    pub pos: usize,\n}\n#[doc = \"  Streaming\"]\npub type ZSTD_inBuffer = ZSTD_inBuffer_s;\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_outBuffer_s {\n    #[doc = \"< start of output buffer\"]\n    pub dst: *mut ::core::ffi::c_void,\n    #[doc = \"< size of output buffer\"]\n    pub size: usize,\n    #[doc = \"< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size\"]\n    pub pos: usize,\n}\npub type ZSTD_outBuffer = ZSTD_outBuffer_s;\npub type ZSTD_CStream = ZSTD_CCtx;\nextern \"C\" {\n    pub fn ZSTD_createCStream() -> *mut ZSTD_CStream;\n}\nextern \"C\" {\n    pub fn ZSTD_freeCStream(zcs: *mut ZSTD_CStream) -> usize;\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_EndDirective {\n    ZSTD_e_continue = 0,\n    ZSTD_e_flush = 1,\n    ZSTD_e_end = 2,\n}\nextern \"C\" {\n    #[doc = \" ZSTD_compressStream2() : Requires v1.4.0+\\n  Behaves about the same as ZSTD_compressStream, with additional control on end directive.\\n  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()\\n  - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)\\n  - output->pos must be <= dstCapacity, input->pos must be <= srcSize\\n  - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.\\n  - endOp must be a valid directive\\n  - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.\\n  - When nbWorkers>=1, function is non-blocking : it copies a portion of input, distributes jobs to internal worker threads, flush to output whatever is available,\\n                                                  and then immediately returns, just indicating that there is some data remaining to be flushed.\\n                                                  The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.\\n  - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.\\n  - @return provides a minimum amount of data remaining to be flushed from internal buffers\\n            or an error code, which can be tested using ZSTD_isError().\\n            if @return != 0, flush is not fully completed, there is still some data left within internal buffers.\\n            This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.\\n            For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.\\n  - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),\\n            only ZSTD_e_end or ZSTD_e_flush operations are allowed.\\n            Before starting a new compression job, or changing compression parameters,\\n            it is required to fully flush internal buffers.\"]\n    pub fn ZSTD_compressStream2(\n        cctx: *mut ZSTD_CCtx,\n        output: *mut ZSTD_outBuffer,\n        input: *mut ZSTD_inBuffer,\n        endOp: ZSTD_EndDirective,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_CStreamInSize() -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_CStreamOutSize() -> usize;\n}\nextern \"C\" {\n    #[doc = \" Equivalent to:\\n\\n     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\\n     ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)\\n     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);\\n\\n Note that ZSTD_initCStream() clears any previously set dictionary. Use the new API\\n to compress with a dictionary.\"]\n    pub fn ZSTD_initCStream(\n        zcs: *mut ZSTD_CStream,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue).\\n NOTE: The return value is different. ZSTD_compressStream() returns a hint for\\n the next read size (if non-zero and not an error). ZSTD_compressStream2()\\n returns the minimum nb of bytes left to flush (if non-zero and not an error).\"]\n    pub fn ZSTD_compressStream(\n        zcs: *mut ZSTD_CStream,\n        output: *mut ZSTD_outBuffer,\n        input: *mut ZSTD_inBuffer,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush).\"]\n    pub fn ZSTD_flushStream(\n        zcs: *mut ZSTD_CStream,\n        output: *mut ZSTD_outBuffer,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end).\"]\n    pub fn ZSTD_endStream(\n        zcs: *mut ZSTD_CStream,\n        output: *mut ZSTD_outBuffer,\n    ) -> usize;\n}\npub type ZSTD_DStream = ZSTD_DCtx;\nextern \"C\" {\n    pub fn ZSTD_createDStream() -> *mut ZSTD_DStream;\n}\nextern \"C\" {\n    pub fn ZSTD_freeDStream(zds: *mut ZSTD_DStream) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_initDStream() :\\n Initialize/reset DStream state for new decompression operation.\\n Call before new decompression operation using same DStream.\\n\\n Note : This function is redundant with the advanced API and equivalent to:\\n     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);\\n     ZSTD_DCtx_refDDict(zds, NULL);\"]\n    pub fn ZSTD_initDStream(zds: *mut ZSTD_DStream) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_decompressStream() :\\n Streaming decompression function.\\n Call repetitively to consume full input updating it as necessary.\\n Function will update both input and output `pos` fields exposing current state via these fields:\\n - `input.pos < input.size`, some input remaining and caller should provide remaining input\\n   on the next call.\\n - `output.pos < output.size`, decoder finished and flushed all remaining buffers.\\n - `output.pos == output.size`, potentially uncflushed data present in the internal buffers,\\n   call ZSTD_decompressStream() again to flush remaining data to output.\\n Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX.\\n\\n @return : 0 when a frame is completely decoded and fully flushed,\\n           or an error code, which can be tested using ZSTD_isError(),\\n           or any other value > 0, which means there is some decoding or flushing to do to complete current frame.\"]\n    pub fn ZSTD_decompressStream(\n        zds: *mut ZSTD_DStream,\n        output: *mut ZSTD_outBuffer,\n        input: *mut ZSTD_inBuffer,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_DStreamInSize() -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_DStreamOutSize() -> usize;\n}\nextern \"C\" {\n    #[doc = \"  Simple dictionary API\\n/\\n/*! ZSTD_compress_usingDict() :\\n  Compression at an explicit compression level using a Dictionary.\\n  A dictionary can be any arbitrary data segment (also called a prefix),\\n  or a buffer with specified information (see zdict.h).\\n  Note : This function loads the dictionary, resulting in significant startup delay.\\n         It's intended for a dictionary used only once.\\n  Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used.\"]\n    pub fn ZSTD_compress_usingDict(\n        ctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_decompress_usingDict() :\\n  Decompression using a known Dictionary.\\n  Dictionary must be identical to the one used during compression.\\n  Note : This function loads the dictionary, resulting in significant startup delay.\\n         It's intended for a dictionary used only once.\\n  Note : When `dict == NULL || dictSize < 8` no dictionary is used.\"]\n    pub fn ZSTD_decompress_usingDict(\n        dctx: *mut ZSTD_DCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> usize;\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_CDict_s {\n    _unused: [u8; 0],\n}\n#[doc = \"  Bulk processing dictionary API\"]\npub type ZSTD_CDict = ZSTD_CDict_s;\nextern \"C\" {\n    #[doc = \" ZSTD_createCDict() :\\n  When compressing multiple messages or blocks using the same dictionary,\\n  it's recommended to digest the dictionary only once, since it's a costly operation.\\n  ZSTD_createCDict() will create a state from digesting a dictionary.\\n  The resulting state can be used for future compression operations with very limited startup cost.\\n  ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.\\n @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict.\\n  Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content.\\n  Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer,\\n      in which case the only thing that it transports is the @compressionLevel.\\n      This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively,\\n      expecting a ZSTD_CDict parameter with any data, including those without a known dictionary.\"]\n    pub fn ZSTD_createCDict(\n        dictBuffer: *const ::core::ffi::c_void,\n        dictSize: usize,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> *mut ZSTD_CDict;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_freeCDict() :\\n  Function frees memory allocated by ZSTD_createCDict().\\n  If a NULL pointer is passed, no operation is performed.\"]\n    pub fn ZSTD_freeCDict(CDict: *mut ZSTD_CDict) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_compress_usingCDict() :\\n  Compression using a digested Dictionary.\\n  Recommended when same dictionary is used multiple times.\\n  Note : compression level is _decided at dictionary creation time_,\\n     and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no)\"]\n    pub fn ZSTD_compress_usingCDict(\n        cctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        cdict: *const ZSTD_CDict,\n    ) -> usize;\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_DDict_s {\n    _unused: [u8; 0],\n}\npub type ZSTD_DDict = ZSTD_DDict_s;\nextern \"C\" {\n    #[doc = \" ZSTD_createDDict() :\\n  Create a digested dictionary, ready to start decompression operation without startup delay.\\n  dictBuffer can be released after DDict creation, as its content is copied inside DDict.\"]\n    pub fn ZSTD_createDDict(\n        dictBuffer: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> *mut ZSTD_DDict;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_freeDDict() :\\n  Function frees memory allocated with ZSTD_createDDict()\\n  If a NULL pointer is passed, no operation is performed.\"]\n    pub fn ZSTD_freeDDict(ddict: *mut ZSTD_DDict) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_decompress_usingDDict() :\\n  Decompression using a digested Dictionary.\\n  Recommended when same dictionary is used multiple times.\"]\n    pub fn ZSTD_decompress_usingDDict(\n        dctx: *mut ZSTD_DCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        ddict: *const ZSTD_DDict,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_getDictID_fromDict() : Requires v1.4.0+\\n  Provides the dictID stored within dictionary.\\n  if @return == 0, the dictionary is not conformant with Zstandard specification.\\n  It can still be loaded, but as a content-only dictionary.\"]\n    pub fn ZSTD_getDictID_fromDict(\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_getDictID_fromCDict() : Requires v1.5.0+\\n  Provides the dictID of the dictionary loaded into `cdict`.\\n  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.\\n  Non-conformant dictionaries can still be loaded, but as content-only dictionaries.\"]\n    pub fn ZSTD_getDictID_fromCDict(\n        cdict: *const ZSTD_CDict,\n    ) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_getDictID_fromDDict() : Requires v1.4.0+\\n  Provides the dictID of the dictionary loaded into `ddict`.\\n  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.\\n  Non-conformant dictionaries can still be loaded, but as content-only dictionaries.\"]\n    pub fn ZSTD_getDictID_fromDDict(\n        ddict: *const ZSTD_DDict,\n    ) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_getDictID_fromFrame() : Requires v1.4.0+\\n  Provides the dictID required to decompressed the frame stored within `src`.\\n  If @return == 0, the dictID could not be decoded.\\n  This could for one of the following reasons :\\n  - The frame does not require a dictionary to be decoded (most common case).\\n  - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden piece of information.\\n    Note : this use case also happens when using a non-conformant dictionary.\\n  - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).\\n  - This is not a Zstandard frame.\\n  When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code.\"]\n    pub fn ZSTD_getDictID_fromFrame(\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_loadDictionary() : Requires v1.4.0+\\n  Create an internal CDict from `dict` buffer.\\n  Decompression will have to use same dictionary.\\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\\n  Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary,\\n           meaning \\\"return to no-dictionary mode\\\".\\n  Note 1 : Dictionary is sticky, it will be used for all future compressed frames,\\n           until parameters are reset, a new dictionary is loaded, or the dictionary\\n           is explicitly invalidated by loading a NULL dictionary.\\n  Note 2 : Loading a dictionary involves building tables.\\n           It's also a CPU consuming operation, with non-negligible impact on latency.\\n           Tables are dependent on compression parameters, and for this reason,\\n           compression parameters can no longer be changed after loading a dictionary.\\n  Note 3 :`dict` content will be copied internally.\\n           Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead.\\n           In such a case, dictionary buffer must outlive its users.\\n  Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()\\n           to precisely select how dictionary content must be interpreted.\\n  Note 5 : This method does not benefit from LDM (long distance mode).\\n           If you want to employ LDM on some large dictionary content,\\n           prefer employing ZSTD_CCtx_refPrefix() described below.\"]\n    pub fn ZSTD_CCtx_loadDictionary(\n        cctx: *mut ZSTD_CCtx,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_refCDict() : Requires v1.4.0+\\n  Reference a prepared dictionary, to be used for all future compressed frames.\\n  Note that compression parameters are enforced from within CDict,\\n  and supersede any compression parameter previously set within CCtx.\\n  The parameters ignored are labelled as \\\"superseded-by-cdict\\\" in the ZSTD_cParameter enum docs.\\n  The ignored parameters will be used again if the CCtx is returned to no-dictionary mode.\\n  The dictionary will remain valid for future compressed frames using same CCtx.\\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\\n  Special : Referencing a NULL CDict means \\\"return to no-dictionary mode\\\".\\n  Note 1 : Currently, only one dictionary can be managed.\\n           Referencing a new dictionary effectively \\\"discards\\\" any previous one.\\n  Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx.\"]\n    pub fn ZSTD_CCtx_refCDict(\n        cctx: *mut ZSTD_CCtx,\n        cdict: *const ZSTD_CDict,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_refPrefix() : Requires v1.4.0+\\n  Reference a prefix (single-usage dictionary) for next compressed frame.\\n  A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end).\\n  Decompression will need same prefix to properly regenerate data.\\n  Compressing with a prefix is similar in outcome as performing a diff and compressing it,\\n  but performs much faster, especially during decompression (compression speed is tunable with compression level).\\n  This method is compatible with LDM (long distance mode).\\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\\n  Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary\\n  Note 1 : Prefix buffer is referenced. It **must** outlive compression.\\n           Its content must remain unmodified during compression.\\n  Note 2 : If the intention is to diff some large src data blob with some prior version of itself,\\n           ensure that the window size is large enough to contain the entire source.\\n           See ZSTD_c_windowLog.\\n  Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters.\\n           It's a CPU consuming operation, with non-negligible impact on latency.\\n           If there is a need to use the same prefix multiple times, consider loadDictionary instead.\\n  Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent).\\n           Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation.\"]\n    pub fn ZSTD_CCtx_refPrefix(\n        cctx: *mut ZSTD_CCtx,\n        prefix: *const ::core::ffi::c_void,\n        prefixSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_loadDictionary() : Requires v1.4.0+\\n  Create an internal DDict from dict buffer, to be used to decompress all future frames.\\n  The dictionary remains valid for all future frames, until explicitly invalidated, or\\n  a new dictionary is loaded.\\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\\n  Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,\\n            meaning \\\"return to no-dictionary mode\\\".\\n  Note 1 : Loading a dictionary involves building tables,\\n           which has a non-negligible impact on CPU usage and latency.\\n           It's recommended to \\\"load once, use many times\\\", to amortize the cost\\n  Note 2 :`dict` content will be copied internally, so `dict` can be released after loading.\\n           Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead.\\n  Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of\\n           how dictionary content is loaded and interpreted.\"]\n    pub fn ZSTD_DCtx_loadDictionary(\n        dctx: *mut ZSTD_DCtx,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_refDDict() : Requires v1.4.0+\\n  Reference a prepared dictionary, to be used to decompress next frames.\\n  The dictionary remains active for decompression of future frames using same DCtx.\\n\\n  If called with ZSTD_d_refMultipleDDicts enabled, repeated calls of this function\\n  will store the DDict references in a table, and the DDict used for decompression\\n  will be determined at decompression time, as per the dict ID in the frame.\\n  The memory for the table is allocated on the first call to refDDict, and can be\\n  freed with ZSTD_freeDCtx().\\n\\n  If called with ZSTD_d_refMultipleDDicts disabled (the default), only one dictionary\\n  will be managed, and referencing a dictionary effectively \\\"discards\\\" any previous one.\\n\\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\\n  Special: referencing a NULL DDict means \\\"return to no-dictionary mode\\\".\\n  Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx.\"]\n    pub fn ZSTD_DCtx_refDDict(\n        dctx: *mut ZSTD_DCtx,\n        ddict: *const ZSTD_DDict,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_refPrefix() : Requires v1.4.0+\\n  Reference a prefix (single-usage dictionary) to decompress next frame.\\n  This is the reverse operation of ZSTD_CCtx_refPrefix(),\\n  and must use the same prefix as the one used during compression.\\n  Prefix is **only used once**. Reference is discarded at end of frame.\\n  End of frame is reached when ZSTD_decompressStream() returns 0.\\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\\n  Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary\\n  Note 2 : Prefix buffer is referenced. It **must** outlive decompression.\\n           Prefix buffer must remain unmodified up to the end of frame,\\n           reached when ZSTD_decompressStream() returns 0.\\n  Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent).\\n           Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section)\\n  Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.\\n           A full dictionary is more costly, as it requires building tables.\"]\n    pub fn ZSTD_DCtx_refPrefix(\n        dctx: *mut ZSTD_DCtx,\n        prefix: *const ::core::ffi::c_void,\n        prefixSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_sizeof_*() : Requires v1.4.0+\\n  These functions give the _current_ memory usage of selected object.\\n  Note that object memory usage can evolve (increase or decrease) over time.\"]\n    pub fn ZSTD_sizeof_CCtx(cctx: *const ZSTD_CCtx) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_sizeof_DCtx(dctx: *const ZSTD_DCtx) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_sizeof_CStream(zcs: *const ZSTD_CStream) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_sizeof_DStream(zds: *const ZSTD_DStream) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_sizeof_CDict(cdict: *const ZSTD_CDict) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_sizeof_DDict(ddict: *const ZSTD_DDict) -> usize;\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_CCtx_params_s {\n    _unused: [u8; 0],\n}\npub type ZSTD_CCtx_params = ZSTD_CCtx_params_s;\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_Sequence {\n    pub offset: ::core::ffi::c_uint,\n    pub litLength: ::core::ffi::c_uint,\n    pub matchLength: ::core::ffi::c_uint,\n    pub rep: ::core::ffi::c_uint,\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_compressionParameters {\n    #[doc = \"< largest match distance : larger == more compression, more memory needed during decompression\"]\n    pub windowLog: ::core::ffi::c_uint,\n    #[doc = \"< fully searched segment : larger == more compression, slower, more memory (useless for fast)\"]\n    pub chainLog: ::core::ffi::c_uint,\n    #[doc = \"< dispatch table : larger == faster, more memory\"]\n    pub hashLog: ::core::ffi::c_uint,\n    #[doc = \"< nb of searches : larger == more compression, slower\"]\n    pub searchLog: ::core::ffi::c_uint,\n    #[doc = \"< match length searched : larger == faster decompression, sometimes less compression\"]\n    pub minMatch: ::core::ffi::c_uint,\n    #[doc = \"< acceptable match size for optimal parser (only) : larger == more compression, slower\"]\n    pub targetLength: ::core::ffi::c_uint,\n    #[doc = \"< see ZSTD_strategy definition above\"]\n    pub strategy: ZSTD_strategy,\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_frameParameters {\n    #[doc = \"< 1: content size will be in frame header (when known)\"]\n    pub contentSizeFlag: ::core::ffi::c_int,\n    #[doc = \"< 1: generate a 32-bits checksum using XXH64 algorithm at end of frame, for error detection\"]\n    pub checksumFlag: ::core::ffi::c_int,\n    #[doc = \"< 1: no dictID will be saved into frame header (dictID is only useful for dictionary compression)\"]\n    pub noDictIDFlag: ::core::ffi::c_int,\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_parameters {\n    pub cParams: ZSTD_compressionParameters,\n    pub fParams: ZSTD_frameParameters,\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_dictContentType_e {\n    ZSTD_dct_auto = 0,\n    ZSTD_dct_rawContent = 1,\n    ZSTD_dct_fullDict = 2,\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_dictLoadMethod_e {\n    #[doc = \"< Copy dictionary content internally\"]\n    ZSTD_dlm_byCopy = 0,\n    #[doc = \"< Reference dictionary content -- the dictionary buffer must outlive its users.\"]\n    ZSTD_dlm_byRef = 1,\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_format_e {\n    ZSTD_f_zstd1 = 0,\n    ZSTD_f_zstd1_magicless = 1,\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_forceIgnoreChecksum_e {\n    ZSTD_d_validateChecksum = 0,\n    ZSTD_d_ignoreChecksum = 1,\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_refMultipleDDicts_e {\n    ZSTD_rmd_refSingleDDict = 0,\n    ZSTD_rmd_refMultipleDDicts = 1,\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_dictAttachPref_e {\n    ZSTD_dictDefaultAttach = 0,\n    ZSTD_dictForceAttach = 1,\n    ZSTD_dictForceCopy = 2,\n    ZSTD_dictForceLoad = 3,\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_literalCompressionMode_e {\n    #[doc = \"< Automatically determine the compression mode based on the compression level.\\n   Negative compression levels will be uncompressed, and positive compression\\n   levels will be compressed.\"]\n    ZSTD_lcm_auto = 0,\n    #[doc = \"< Always attempt Huffman compression. Uncompressed literals will still be\\n   emitted if Huffman compression is not profitable.\"]\n    ZSTD_lcm_huffman = 1,\n    #[doc = \"< Always emit uncompressed literals.\"]\n    ZSTD_lcm_uncompressed = 2,\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_paramSwitch_e {\n    ZSTD_ps_auto = 0,\n    ZSTD_ps_enable = 1,\n    ZSTD_ps_disable = 2,\n}\nextern \"C\" {\n    #[doc = \" ZSTD_findDecompressedSize() :\\n  `src` should point to the start of a series of ZSTD encoded and/or skippable frames\\n  `srcSize` must be the _exact_ size of this series\\n       (i.e. there should be a frame boundary at `src + srcSize`)\\n  @return : - decompressed size of all data in all successive frames\\n            - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN\\n            - if an error occurred: ZSTD_CONTENTSIZE_ERROR\\n\\n   note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.\\n            When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.\\n            In which case, it's necessary to use streaming mode to decompress data.\\n   note 2 : decompressed size is always present when compression is done with ZSTD_compress()\\n   note 3 : decompressed size can be very large (64-bits value),\\n            potentially larger than what local system can handle as a single memory segment.\\n            In which case, it's necessary to use streaming mode to decompress data.\\n   note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.\\n            Always ensure result fits within application's authorized limits.\\n            Each application can set its own limits.\\n   note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to\\n            read each contained frame header.  This is fast as most of the data is skipped,\\n            however it does mean that all frame data must be present and valid.\"]\n    pub fn ZSTD_findDecompressedSize(\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> ::core::ffi::c_ulonglong;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_decompressBound() :\\n  `src` should point to the start of a series of ZSTD encoded and/or skippable frames\\n  `srcSize` must be the _exact_ size of this series\\n       (i.e. there should be a frame boundary at `src + srcSize`)\\n  @return : - upper-bound for the decompressed size of all data in all successive frames\\n            - if an error occurred: ZSTD_CONTENTSIZE_ERROR\\n\\n  note 1  : an error can occur if `src` contains an invalid or incorrectly formatted frame.\\n  note 2  : the upper-bound is exact when the decompressed size field is available in every ZSTD encoded frame of `src`.\\n            in this case, `ZSTD_findDecompressedSize` and `ZSTD_decompressBound` return the same value.\\n  note 3  : when the decompressed size field isn't available, the upper-bound for that frame is calculated by:\\n              upper-bound = # blocks * min(128 KB, Window_Size)\"]\n    pub fn ZSTD_decompressBound(\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> ::core::ffi::c_ulonglong;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_frameHeaderSize() :\\n  srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX.\\n @return : size of the Frame Header,\\n           or an error code (if srcSize is too small)\"]\n    pub fn ZSTD_frameHeaderSize(\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_frameType_e {\n    ZSTD_frame = 0,\n    ZSTD_skippableFrame = 1,\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_frameHeader {\n    pub frameContentSize: ::core::ffi::c_ulonglong,\n    pub windowSize: ::core::ffi::c_ulonglong,\n    pub blockSizeMax: ::core::ffi::c_uint,\n    pub frameType: ZSTD_frameType_e,\n    pub headerSize: ::core::ffi::c_uint,\n    pub dictID: ::core::ffi::c_uint,\n    pub checksumFlag: ::core::ffi::c_uint,\n    pub _reserved1: ::core::ffi::c_uint,\n    pub _reserved2: ::core::ffi::c_uint,\n}\nextern \"C\" {\n    #[doc = \" ZSTD_getFrameHeader() :\\n  decode Frame Header, or requires larger `srcSize`.\\n @return : 0, `zfhPtr` is correctly filled,\\n          >0, `srcSize` is too small, value is wanted `srcSize` amount,\\n           or an error code, which can be tested using ZSTD_isError()\"]\n    pub fn ZSTD_getFrameHeader(\n        zfhPtr: *mut ZSTD_frameHeader,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_getFrameHeader_advanced() :\\n  same as ZSTD_getFrameHeader(),\\n  with added capability to select a format (like ZSTD_f_zstd1_magicless)\"]\n    pub fn ZSTD_getFrameHeader_advanced(\n        zfhPtr: *mut ZSTD_frameHeader,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        format: ZSTD_format_e,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_decompressionMargin() :\\n Zstd supports in-place decompression, where the input and output buffers overlap.\\n In this case, the output buffer must be at least (Margin + Output_Size) bytes large,\\n and the input buffer must be at the end of the output buffer.\\n\\n  _______________________ Output Buffer ________________________\\n |                                                              |\\n |                                        ____ Input Buffer ____|\\n |                                       |                      |\\n v                                       v                      v\\n |---------------------------------------|-----------|----------|\\n ^                                                   ^          ^\\n |___________________ Output_Size ___________________|_ Margin _|\\n\\n NOTE: See also ZSTD_DECOMPRESSION_MARGIN().\\n NOTE: This applies only to single-pass decompression through ZSTD_decompress() or\\n ZSTD_decompressDCtx().\\n NOTE: This function supports multi-frame input.\\n\\n @param src The compressed frame(s)\\n @param srcSize The size of the compressed frame(s)\\n @returns The decompression margin or an error that can be checked with ZSTD_isError().\"]\n    pub fn ZSTD_decompressionMargin(\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_sequenceFormat_e {\n    ZSTD_sf_noBlockDelimiters = 0,\n    ZSTD_sf_explicitBlockDelimiters = 1,\n}\nextern \"C\" {\n    #[doc = \" ZSTD_sequenceBound() :\\n `srcSize` : size of the input buffer\\n  @return : upper-bound for the number of sequences that can be generated\\n            from a buffer of srcSize bytes\\n\\n  note : returns number of sequences - to get bytes, multiply by sizeof(ZSTD_Sequence).\"]\n    pub fn ZSTD_sequenceBound(srcSize: usize) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_generateSequences() :\\n Generate sequences using ZSTD_compress2(), given a source buffer.\\n\\n Each block will end with a dummy sequence\\n with offset == 0, matchLength == 0, and litLength == length of last literals.\\n litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0)\\n simply acts as a block delimiter.\\n\\n @zc can be used to insert custom compression params.\\n This function invokes ZSTD_compress2().\\n\\n The output of this function can be fed into ZSTD_compressSequences() with CCtx\\n setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters\\n @return : number of sequences generated\"]\n    pub fn ZSTD_generateSequences(\n        zc: *mut ZSTD_CCtx,\n        outSeqs: *mut ZSTD_Sequence,\n        outSeqsSize: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_mergeBlockDelimiters() :\\n Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals\\n by merging them into the literals of the next sequence.\\n\\n As such, the final generated result has no explicit representation of block boundaries,\\n and the final last literals segment is not represented in the sequences.\\n\\n The output of this function can be fed into ZSTD_compressSequences() with CCtx\\n setting of ZSTD_c_blockDelimiters as ZSTD_sf_noBlockDelimiters\\n @return : number of sequences left after merging\"]\n    pub fn ZSTD_mergeBlockDelimiters(\n        sequences: *mut ZSTD_Sequence,\n        seqsSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_compressSequences() :\\n Compress an array of ZSTD_Sequence, associated with @src buffer, into dst.\\n @src contains the entire input (not just the literals).\\n If @srcSize > sum(sequence.length), the remaining bytes are considered all literals\\n If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.)\\n The entire source is compressed into a single frame.\\n\\n The compression behavior changes based on cctx params. In particular:\\n    If ZSTD_c_blockDelimiters == ZSTD_sf_noBlockDelimiters, the array of ZSTD_Sequence is expected to contain\\n    no block delimiters (defined in ZSTD_Sequence). Block boundaries are roughly determined based on\\n    the block size derived from the cctx, and sequences may be split. This is the default setting.\\n\\n    If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain\\n    block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided.\\n\\n    If ZSTD_c_validateSequences == 0, this function will blindly accept the sequences provided. Invalid sequences cause undefined\\n    behavior. If ZSTD_c_validateSequences == 1, then if sequence is invalid (see doc/zstd_compression_format.md for\\n    specifics regarding offset/matchlength requirements) then the function will bail out and return an error.\\n\\n    In addition to the two adjustable experimental params, there are other important cctx params.\\n    - ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN.\\n    - ZSTD_c_compressionLevel accordingly adjusts the strength of the entropy coder, as it would in typical compression.\\n    - ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset\\n      is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md\\n\\n Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused.\\n Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly,\\n         and cannot emit an RLE block that disagrees with the repcode history\\n @return : final compressed size, or a ZSTD error code.\"]\n    pub fn ZSTD_compressSequences(\n        cctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstSize: usize,\n        inSeqs: *const ZSTD_Sequence,\n        inSeqsSize: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_writeSkippableFrame() :\\n Generates a zstd skippable frame containing data given by src, and writes it to dst buffer.\\n\\n Skippable frames begin with a 4-byte magic number. There are 16 possible choices of magic number,\\n ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15.\\n As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so\\n the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant.\\n\\n Returns an error if destination buffer is not large enough, if the source size is not representable\\n with a 4-byte unsigned int, or if the parameter magicVariant is greater than 15 (and therefore invalid).\\n\\n @return : number of bytes written or a ZSTD error.\"]\n    pub fn ZSTD_writeSkippableFrame(\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        magicVariant: ::core::ffi::c_uint,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_readSkippableFrame() :\\n Retrieves a zstd skippable frame containing data given by src, and writes it to dst buffer.\\n\\n The parameter magicVariant will receive the magicVariant that was supplied when the frame was written,\\n i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START.  This can be NULL if the caller is not interested\\n in the magicVariant.\\n\\n Returns an error if destination buffer is not large enough, or if the frame is not skippable.\\n\\n @return : number of bytes written or a ZSTD error.\"]\n    pub fn ZSTD_readSkippableFrame(\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        magicVariant: *mut ::core::ffi::c_uint,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_isSkippableFrame() :\\n  Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame.\"]\n    pub fn ZSTD_isSkippableFrame(\n        buffer: *const ::core::ffi::c_void,\n        size: usize,\n    ) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_estimate*() :\\n  These functions make it possible to estimate memory usage\\n  of a future {D,C}Ctx, before its creation.\\n\\n  ZSTD_estimateCCtxSize() will provide a memory budget large enough\\n  for any compression level up to selected one.\\n  Note : Unlike ZSTD_estimateCStreamSize*(), this estimate\\n         does not include space for a window buffer.\\n         Therefore, the estimation is only guaranteed for single-shot compressions, not streaming.\\n  The estimate will assume the input may be arbitrarily large,\\n  which is the worst case.\\n\\n  When srcSize can be bound by a known and rather \\\"small\\\" value,\\n  this fact can be used to provide a tighter estimation\\n  because the CCtx compression context will need less memory.\\n  This tighter estimation can be provided by more advanced functions\\n  ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(),\\n  and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter().\\n  Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits.\\n\\n  Note : only single-threaded compression is supported.\\n  ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.\\n\\n  Note 2 : ZSTD_estimateCCtxSize* functions are not compatible with the Block-Level Sequence Producer API at this time.\\n  Size estimates assume that no external sequence producer is registered.\"]\n    pub fn ZSTD_estimateCCtxSize(\n        compressionLevel: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_estimateCCtxSize_usingCParams(\n        cParams: ZSTD_compressionParameters,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_estimateCCtxSize_usingCCtxParams(\n        params: *const ZSTD_CCtx_params,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_estimateDCtxSize() -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_estimateCStreamSize() :\\n  ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.\\n  It will also consider src size to be arbitrarily \\\"large\\\", which is worst case.\\n  If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.\\n  ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.\\n  ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.\\n  Note : CStream size estimation is only correct for single-threaded compression.\\n  ZSTD_DStream memory budget depends on window Size.\\n  This information can be passed manually, using ZSTD_estimateDStreamSize,\\n  or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();\\n  Note : if streaming is init with function ZSTD_init?Stream_usingDict(),\\n         an internal ?Dict will be created, which additional size is not estimated here.\\n         In this case, get total size by adding ZSTD_estimate?DictSize\\n  Note 2 : only single-threaded compression is supported.\\n  ZSTD_estimateCStreamSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.\\n  Note 3 : ZSTD_estimateCStreamSize* functions are not compatible with the Block-Level Sequence Producer API at this time.\\n  Size estimates assume that no external sequence producer is registered.\"]\n    pub fn ZSTD_estimateCStreamSize(\n        compressionLevel: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_estimateCStreamSize_usingCParams(\n        cParams: ZSTD_compressionParameters,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_estimateCStreamSize_usingCCtxParams(\n        params: *const ZSTD_CCtx_params,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_estimateDStreamSize(windowSize: usize) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_estimateDStreamSize_fromFrame(\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_estimate?DictSize() :\\n  ZSTD_estimateCDictSize() will bet that src size is relatively \\\"small\\\", and content is copied, like ZSTD_createCDict().\\n  ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced().\\n  Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller.\"]\n    pub fn ZSTD_estimateCDictSize(\n        dictSize: usize,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_estimateCDictSize_advanced(\n        dictSize: usize,\n        cParams: ZSTD_compressionParameters,\n        dictLoadMethod: ZSTD_dictLoadMethod_e,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_estimateDDictSize(\n        dictSize: usize,\n        dictLoadMethod: ZSTD_dictLoadMethod_e,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_initStatic*() :\\n  Initialize an object using a pre-allocated fixed-size buffer.\\n  workspace: The memory area to emplace the object into.\\n             Provided pointer *must be 8-bytes aligned*.\\n             Buffer must outlive object.\\n  workspaceSize: Use ZSTD_estimate*Size() to determine\\n                 how large workspace must be to support target scenario.\\n @return : pointer to object (same address as workspace, just different type),\\n           or NULL if error (size too small, incorrect alignment, etc.)\\n  Note : zstd will never resize nor malloc() when using a static buffer.\\n         If the object requires more memory than available,\\n         zstd will just error out (typically ZSTD_error_memory_allocation).\\n  Note 2 : there is no corresponding \\\"free\\\" function.\\n           Since workspace is allocated externally, it must be freed externally too.\\n  Note 3 : cParams : use ZSTD_getCParams() to convert a compression level\\n           into its associated cParams.\\n  Limitation 1 : currently not compatible with internal dictionary creation, triggered by\\n                 ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict().\\n  Limitation 2 : static cctx currently not compatible with multi-threading.\\n  Limitation 3 : static dctx is incompatible with legacy support.\"]\n    pub fn ZSTD_initStaticCCtx(\n        workspace: *mut ::core::ffi::c_void,\n        workspaceSize: usize,\n    ) -> *mut ZSTD_CCtx;\n}\nextern \"C\" {\n    pub fn ZSTD_initStaticCStream(\n        workspace: *mut ::core::ffi::c_void,\n        workspaceSize: usize,\n    ) -> *mut ZSTD_CStream;\n}\nextern \"C\" {\n    pub fn ZSTD_initStaticDCtx(\n        workspace: *mut ::core::ffi::c_void,\n        workspaceSize: usize,\n    ) -> *mut ZSTD_DCtx;\n}\nextern \"C\" {\n    pub fn ZSTD_initStaticDStream(\n        workspace: *mut ::core::ffi::c_void,\n        workspaceSize: usize,\n    ) -> *mut ZSTD_DStream;\n}\nextern \"C\" {\n    pub fn ZSTD_initStaticCDict(\n        workspace: *mut ::core::ffi::c_void,\n        workspaceSize: usize,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        dictLoadMethod: ZSTD_dictLoadMethod_e,\n        dictContentType: ZSTD_dictContentType_e,\n        cParams: ZSTD_compressionParameters,\n    ) -> *const ZSTD_CDict;\n}\nextern \"C\" {\n    pub fn ZSTD_initStaticDDict(\n        workspace: *mut ::core::ffi::c_void,\n        workspaceSize: usize,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        dictLoadMethod: ZSTD_dictLoadMethod_e,\n        dictContentType: ZSTD_dictContentType_e,\n    ) -> *const ZSTD_DDict;\n}\n#[doc = \" Custom memory allocation :\\n  These prototypes make it possible to pass your own allocation/free functions.\\n  ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below.\\n  All allocation/free operations will be completed using these custom variants instead of regular <stdlib.h> ones.\"]\npub type ZSTD_allocFunction = ::core::option::Option<\n    unsafe extern \"C\" fn(\n        opaque: *mut ::core::ffi::c_void,\n        size: usize,\n    ) -> *mut ::core::ffi::c_void,\n>;\npub type ZSTD_freeFunction = ::core::option::Option<\n    unsafe extern \"C\" fn(\n        opaque: *mut ::core::ffi::c_void,\n        address: *mut ::core::ffi::c_void,\n    ),\n>;\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_customMem {\n    pub customAlloc: ZSTD_allocFunction,\n    pub customFree: ZSTD_freeFunction,\n    pub opaque: *mut ::core::ffi::c_void,\n}\nextern \"C\" {\n    #[doc = \"< this constant defers to stdlib's functions\"]\n    pub static ZSTD_defaultCMem: ZSTD_customMem;\n}\nextern \"C\" {\n    pub fn ZSTD_createCCtx_advanced(\n        customMem: ZSTD_customMem,\n    ) -> *mut ZSTD_CCtx;\n}\nextern \"C\" {\n    pub fn ZSTD_createCStream_advanced(\n        customMem: ZSTD_customMem,\n    ) -> *mut ZSTD_CStream;\n}\nextern \"C\" {\n    pub fn ZSTD_createDCtx_advanced(\n        customMem: ZSTD_customMem,\n    ) -> *mut ZSTD_DCtx;\n}\nextern \"C\" {\n    pub fn ZSTD_createDStream_advanced(\n        customMem: ZSTD_customMem,\n    ) -> *mut ZSTD_DStream;\n}\nextern \"C\" {\n    pub fn ZSTD_createCDict_advanced(\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        dictLoadMethod: ZSTD_dictLoadMethod_e,\n        dictContentType: ZSTD_dictContentType_e,\n        cParams: ZSTD_compressionParameters,\n        customMem: ZSTD_customMem,\n    ) -> *mut ZSTD_CDict;\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct POOL_ctx_s {\n    _unused: [u8; 0],\n}\n#[doc = \" Thread pool :\\n  These prototypes make it possible to share a thread pool among multiple compression contexts.\\n  This can limit resources for applications with multiple threads where each one uses\\n  a threaded compression mode (via ZSTD_c_nbWorkers parameter).\\n  ZSTD_createThreadPool creates a new thread pool with a given number of threads.\\n  Note that the lifetime of such pool must exist while being used.\\n  ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value\\n  to use an internal thread pool).\\n  ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer.\"]\npub type ZSTD_threadPool = POOL_ctx_s;\nextern \"C\" {\n    pub fn ZSTD_createThreadPool(numThreads: usize) -> *mut ZSTD_threadPool;\n}\nextern \"C\" {\n    pub fn ZSTD_freeThreadPool(pool: *mut ZSTD_threadPool);\n}\nextern \"C\" {\n    pub fn ZSTD_CCtx_refThreadPool(\n        cctx: *mut ZSTD_CCtx,\n        pool: *mut ZSTD_threadPool,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_createCDict_advanced2(\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        dictLoadMethod: ZSTD_dictLoadMethod_e,\n        dictContentType: ZSTD_dictContentType_e,\n        cctxParams: *const ZSTD_CCtx_params,\n        customMem: ZSTD_customMem,\n    ) -> *mut ZSTD_CDict;\n}\nextern \"C\" {\n    pub fn ZSTD_createDDict_advanced(\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        dictLoadMethod: ZSTD_dictLoadMethod_e,\n        dictContentType: ZSTD_dictContentType_e,\n        customMem: ZSTD_customMem,\n    ) -> *mut ZSTD_DDict;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_createCDict_byReference() :\\n  Create a digested dictionary for compression\\n  Dictionary content is just referenced, not duplicated.\\n  As a consequence, `dictBuffer` **must** outlive CDict,\\n  and its content must remain unmodified throughout the lifetime of CDict.\\n  note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef\"]\n    pub fn ZSTD_createCDict_byReference(\n        dictBuffer: *const ::core::ffi::c_void,\n        dictSize: usize,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> *mut ZSTD_CDict;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_getCParams() :\\n @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.\\n `estimatedSrcSize` value is optional, select 0 if not known\"]\n    pub fn ZSTD_getCParams(\n        compressionLevel: ::core::ffi::c_int,\n        estimatedSrcSize: ::core::ffi::c_ulonglong,\n        dictSize: usize,\n    ) -> ZSTD_compressionParameters;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_getParams() :\\n  same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.\\n  All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0\"]\n    pub fn ZSTD_getParams(\n        compressionLevel: ::core::ffi::c_int,\n        estimatedSrcSize: ::core::ffi::c_ulonglong,\n        dictSize: usize,\n    ) -> ZSTD_parameters;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_checkCParams() :\\n  Ensure param values remain within authorized range.\\n @return 0 on success, or an error code (can be checked with ZSTD_isError())\"]\n    pub fn ZSTD_checkCParams(params: ZSTD_compressionParameters) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_adjustCParams() :\\n  optimize params for a given `srcSize` and `dictSize`.\\n `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN.\\n `dictSize` must be `0` when there is no dictionary.\\n  cPar can be invalid : all parameters will be clamped within valid range in the @return struct.\\n  This function never fails (wide contract)\"]\n    pub fn ZSTD_adjustCParams(\n        cPar: ZSTD_compressionParameters,\n        srcSize: ::core::ffi::c_ulonglong,\n        dictSize: usize,\n    ) -> ZSTD_compressionParameters;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_setCParams() :\\n  Set all parameters provided within @p cparams into the working @p cctx.\\n  Note : if modifying parameters during compression (MT mode only),\\n         note that changes to the .windowLog parameter will be ignored.\\n @return 0 on success, or an error code (can be checked with ZSTD_isError()).\\n         On failure, no parameters are updated.\"]\n    pub fn ZSTD_CCtx_setCParams(\n        cctx: *mut ZSTD_CCtx,\n        cparams: ZSTD_compressionParameters,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_setFParams() :\\n  Set all parameters provided within @p fparams into the working @p cctx.\\n @return 0 on success, or an error code (can be checked with ZSTD_isError()).\"]\n    pub fn ZSTD_CCtx_setFParams(\n        cctx: *mut ZSTD_CCtx,\n        fparams: ZSTD_frameParameters,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_setParams() :\\n  Set all parameters provided within @p params into the working @p cctx.\\n @return 0 on success, or an error code (can be checked with ZSTD_isError()).\"]\n    pub fn ZSTD_CCtx_setParams(\n        cctx: *mut ZSTD_CCtx,\n        params: ZSTD_parameters,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_compress_advanced() :\\n  Note : this function is now DEPRECATED.\\n         It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters.\\n  This prototype will generate compilation warnings.\"]\n    pub fn ZSTD_compress_advanced(\n        cctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        params: ZSTD_parameters,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_compress_usingCDict_advanced() :\\n  Note : this function is now DEPRECATED.\\n         It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters.\\n  This prototype will generate compilation warnings.\"]\n    pub fn ZSTD_compress_usingCDict_advanced(\n        cctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        cdict: *const ZSTD_CDict,\n        fParams: ZSTD_frameParameters,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_loadDictionary_byReference() :\\n  Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx.\\n  It saves some memory, but also requires that `dict` outlives its usage within `cctx`\"]\n    pub fn ZSTD_CCtx_loadDictionary_byReference(\n        cctx: *mut ZSTD_CCtx,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_loadDictionary_advanced() :\\n  Same as ZSTD_CCtx_loadDictionary(), but gives finer control over\\n  how to load the dictionary (by copy ? by reference ?)\\n  and how to interpret it (automatic ? force raw mode ? full mode only ?)\"]\n    pub fn ZSTD_CCtx_loadDictionary_advanced(\n        cctx: *mut ZSTD_CCtx,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        dictLoadMethod: ZSTD_dictLoadMethod_e,\n        dictContentType: ZSTD_dictContentType_e,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_refPrefix_advanced() :\\n  Same as ZSTD_CCtx_refPrefix(), but gives finer control over\\n  how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?)\"]\n    pub fn ZSTD_CCtx_refPrefix_advanced(\n        cctx: *mut ZSTD_CCtx,\n        prefix: *const ::core::ffi::c_void,\n        prefixSize: usize,\n        dictContentType: ZSTD_dictContentType_e,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_getParameter() :\\n  Get the requested compression parameter value, selected by enum ZSTD_cParameter,\\n  and store it into int* value.\\n @return : 0, or an error code (which can be tested with ZSTD_isError()).\"]\n    pub fn ZSTD_CCtx_getParameter(\n        cctx: *const ZSTD_CCtx,\n        param: ZSTD_cParameter,\n        value: *mut ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_params :\\n  Quick howto :\\n  - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure\\n  - ZSTD_CCtxParams_setParameter() : Push parameters one by one into\\n                                     an existing ZSTD_CCtx_params structure.\\n                                     This is similar to\\n                                     ZSTD_CCtx_setParameter().\\n  - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to\\n                                    an existing CCtx.\\n                                    These parameters will be applied to\\n                                    all subsequent frames.\\n  - ZSTD_compressStream2() : Do compression using the CCtx.\\n  - ZSTD_freeCCtxParams() : Free the memory, accept NULL pointer.\\n\\n  This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams()\\n  for static allocation of CCtx for single-threaded compression.\"]\n    pub fn ZSTD_createCCtxParams() -> *mut ZSTD_CCtx_params;\n}\nextern \"C\" {\n    pub fn ZSTD_freeCCtxParams(params: *mut ZSTD_CCtx_params) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtxParams_reset() :\\n  Reset params to default values.\"]\n    pub fn ZSTD_CCtxParams_reset(params: *mut ZSTD_CCtx_params) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtxParams_init() :\\n  Initializes the compression parameters of cctxParams according to\\n  compression level. All other parameters are reset to their default values.\"]\n    pub fn ZSTD_CCtxParams_init(\n        cctxParams: *mut ZSTD_CCtx_params,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtxParams_init_advanced() :\\n  Initializes the compression and frame parameters of cctxParams according to\\n  params. All other parameters are reset to their default values.\"]\n    pub fn ZSTD_CCtxParams_init_advanced(\n        cctxParams: *mut ZSTD_CCtx_params,\n        params: ZSTD_parameters,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtxParams_setParameter() : Requires v1.4.0+\\n  Similar to ZSTD_CCtx_setParameter.\\n  Set one compression parameter, selected by enum ZSTD_cParameter.\\n  Parameters must be applied to a ZSTD_CCtx using\\n  ZSTD_CCtx_setParametersUsingCCtxParams().\\n @result : a code representing success or failure (which can be tested with\\n           ZSTD_isError()).\"]\n    pub fn ZSTD_CCtxParams_setParameter(\n        params: *mut ZSTD_CCtx_params,\n        param: ZSTD_cParameter,\n        value: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtxParams_getParameter() :\\n Similar to ZSTD_CCtx_getParameter.\\n Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.\\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\"]\n    pub fn ZSTD_CCtxParams_getParameter(\n        params: *const ZSTD_CCtx_params,\n        param: ZSTD_cParameter,\n        value: *mut ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_CCtx_setParametersUsingCCtxParams() :\\n  Apply a set of ZSTD_CCtx_params to the compression context.\\n  This can be done even after compression is started,\\n    if nbWorkers==0, this will have no impact until a new compression is started.\\n    if nbWorkers>=1, new parameters will be picked up at next job,\\n       with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated).\"]\n    pub fn ZSTD_CCtx_setParametersUsingCCtxParams(\n        cctx: *mut ZSTD_CCtx,\n        params: *const ZSTD_CCtx_params,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_compressStream2_simpleArgs() :\\n  Same as ZSTD_compressStream2(),\\n  but using only integral types as arguments.\\n  This variant might be helpful for binders from dynamic languages\\n  which have troubles handling structures containing memory pointers.\"]\n    pub fn ZSTD_compressStream2_simpleArgs(\n        cctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        dstPos: *mut usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        srcPos: *mut usize,\n        endOp: ZSTD_EndDirective,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_isFrame() :\\n  Tells if the content of `buffer` starts with a valid Frame Identifier.\\n  Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.\\n  Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.\\n  Note 3 : Skippable Frame Identifiers are considered valid.\"]\n    pub fn ZSTD_isFrame(\n        buffer: *const ::core::ffi::c_void,\n        size: usize,\n    ) -> ::core::ffi::c_uint;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_createDDict_byReference() :\\n  Create a digested dictionary, ready to start decompression operation without startup delay.\\n  Dictionary content is referenced, and therefore stays in dictBuffer.\\n  It is important that dictBuffer outlives DDict,\\n  it must remain read accessible throughout the lifetime of DDict\"]\n    pub fn ZSTD_createDDict_byReference(\n        dictBuffer: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> *mut ZSTD_DDict;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_loadDictionary_byReference() :\\n  Same as ZSTD_DCtx_loadDictionary(),\\n  but references `dict` content instead of copying it into `dctx`.\\n  This saves memory if `dict` remains around.,\\n  However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression.\"]\n    pub fn ZSTD_DCtx_loadDictionary_byReference(\n        dctx: *mut ZSTD_DCtx,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_loadDictionary_advanced() :\\n  Same as ZSTD_DCtx_loadDictionary(),\\n  but gives direct control over\\n  how to load the dictionary (by copy ? by reference ?)\\n  and how to interpret it (automatic ? force raw mode ? full mode only ?).\"]\n    pub fn ZSTD_DCtx_loadDictionary_advanced(\n        dctx: *mut ZSTD_DCtx,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        dictLoadMethod: ZSTD_dictLoadMethod_e,\n        dictContentType: ZSTD_dictContentType_e,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_refPrefix_advanced() :\\n  Same as ZSTD_DCtx_refPrefix(), but gives finer control over\\n  how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?)\"]\n    pub fn ZSTD_DCtx_refPrefix_advanced(\n        dctx: *mut ZSTD_DCtx,\n        prefix: *const ::core::ffi::c_void,\n        prefixSize: usize,\n        dictContentType: ZSTD_dictContentType_e,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_setMaxWindowSize() :\\n  Refuses allocating internal buffers for frames requiring a window size larger than provided limit.\\n  This protects a decoder context from reserving too much memory for itself (potential attack scenario).\\n  This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.\\n  By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT)\\n @return : 0, or an error code (which can be tested using ZSTD_isError()).\"]\n    pub fn ZSTD_DCtx_setMaxWindowSize(\n        dctx: *mut ZSTD_DCtx,\n        maxWindowSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_getParameter() :\\n  Get the requested decompression parameter value, selected by enum ZSTD_dParameter,\\n  and store it into int* value.\\n @return : 0, or an error code (which can be tested with ZSTD_isError()).\"]\n    pub fn ZSTD_DCtx_getParameter(\n        dctx: *mut ZSTD_DCtx,\n        param: ZSTD_dParameter,\n        value: *mut ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_DCtx_setFormat() :\\n  This function is REDUNDANT. Prefer ZSTD_DCtx_setParameter().\\n  Instruct the decoder context about what kind of data to decode next.\\n  This instruction is mandatory to decode data without a fully-formed header,\\n  such ZSTD_f_zstd1_magicless for example.\\n @return : 0, or an error code (which can be tested using ZSTD_isError()).\"]\n    pub fn ZSTD_DCtx_setFormat(\n        dctx: *mut ZSTD_DCtx,\n        format: ZSTD_format_e,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_decompressStream_simpleArgs() :\\n  Same as ZSTD_decompressStream(),\\n  but using only integral types as arguments.\\n  This can be helpful for binders from dynamic languages\\n  which have troubles handling structures containing memory pointers.\"]\n    pub fn ZSTD_decompressStream_simpleArgs(\n        dctx: *mut ZSTD_DCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        dstPos: *mut usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        srcPos: *mut usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_initCStream_srcSize() :\\n This function is DEPRECATED, and equivalent to:\\n     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\\n     ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)\\n     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);\\n     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);\\n\\n pledgedSrcSize must be correct. If it is not known at init time, use\\n ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs,\\n \\\"0\\\" also disables frame content size field. It may be enabled in the future.\\n This prototype will generate compilation warnings.\"]\n    pub fn ZSTD_initCStream_srcSize(\n        zcs: *mut ZSTD_CStream,\n        compressionLevel: ::core::ffi::c_int,\n        pledgedSrcSize: ::core::ffi::c_ulonglong,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_initCStream_usingDict() :\\n This function is DEPRECATED, and is equivalent to:\\n     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\\n     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);\\n     ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);\\n\\n Creates of an internal CDict (incompatible with static CCtx), except if\\n dict == NULL or dictSize < 8, in which case no dict is used.\\n Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if\\n it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.\\n This prototype will generate compilation warnings.\"]\n    pub fn ZSTD_initCStream_usingDict(\n        zcs: *mut ZSTD_CStream,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_initCStream_advanced() :\\n This function is DEPRECATED, and is equivalent to:\\n     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\\n     ZSTD_CCtx_setParams(zcs, params);\\n     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);\\n     ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);\\n\\n dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy.\\n pledgedSrcSize must be correct.\\n If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.\\n This prototype will generate compilation warnings.\"]\n    pub fn ZSTD_initCStream_advanced(\n        zcs: *mut ZSTD_CStream,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        params: ZSTD_parameters,\n        pledgedSrcSize: ::core::ffi::c_ulonglong,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_initCStream_usingCDict() :\\n This function is DEPRECATED, and equivalent to:\\n     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\\n     ZSTD_CCtx_refCDict(zcs, cdict);\\n\\n note : cdict will just be referenced, and must outlive compression session\\n This prototype will generate compilation warnings.\"]\n    pub fn ZSTD_initCStream_usingCDict(\n        zcs: *mut ZSTD_CStream,\n        cdict: *const ZSTD_CDict,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_initCStream_usingCDict_advanced() :\\n   This function is DEPRECATED, and is equivalent to:\\n     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\\n     ZSTD_CCtx_setFParams(zcs, fParams);\\n     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);\\n     ZSTD_CCtx_refCDict(zcs, cdict);\\n\\n same as ZSTD_initCStream_usingCDict(), with control over frame parameters.\\n pledgedSrcSize must be correct. If srcSize is not known at init time, use\\n value ZSTD_CONTENTSIZE_UNKNOWN.\\n This prototype will generate compilation warnings.\"]\n    pub fn ZSTD_initCStream_usingCDict_advanced(\n        zcs: *mut ZSTD_CStream,\n        cdict: *const ZSTD_CDict,\n        fParams: ZSTD_frameParameters,\n        pledgedSrcSize: ::core::ffi::c_ulonglong,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_resetCStream() :\\n This function is DEPRECATED, and is equivalent to:\\n     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\\n     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);\\n Note: ZSTD_resetCStream() interprets pledgedSrcSize == 0 as ZSTD_CONTENTSIZE_UNKNOWN, but\\n       ZSTD_CCtx_setPledgedSrcSize() does not do the same, so ZSTD_CONTENTSIZE_UNKNOWN must be\\n       explicitly specified.\\n\\n  start a new frame, using same parameters from previous frame.\\n  This is typically useful to skip dictionary loading stage, since it will re-use it in-place.\\n  Note that zcs must be init at least once before using ZSTD_resetCStream().\\n  If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.\\n  If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.\\n  For the time being, pledgedSrcSize==0 is interpreted as \\\"srcSize unknown\\\" for compatibility with older programs,\\n  but it will change to mean \\\"empty\\\" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.\\n @return : 0, or an error code (which can be tested using ZSTD_isError())\\n  This prototype will generate compilation warnings.\"]\n    pub fn ZSTD_resetCStream(\n        zcs: *mut ZSTD_CStream,\n        pledgedSrcSize: ::core::ffi::c_ulonglong,\n    ) -> usize;\n}\n#[repr(C)]\n#[derive(Debug, Copy, Clone)]\npub struct ZSTD_frameProgression {\n    pub ingested: ::core::ffi::c_ulonglong,\n    pub consumed: ::core::ffi::c_ulonglong,\n    pub produced: ::core::ffi::c_ulonglong,\n    pub flushed: ::core::ffi::c_ulonglong,\n    pub currentJobID: ::core::ffi::c_uint,\n    pub nbActiveWorkers: ::core::ffi::c_uint,\n}\nextern \"C\" {\n    pub fn ZSTD_getFrameProgression(\n        cctx: *const ZSTD_CCtx,\n    ) -> ZSTD_frameProgression;\n}\nextern \"C\" {\n    #[doc = \" ZSTD_toFlushNow() :\\n  Tell how many bytes are ready to be flushed immediately.\\n  Useful for multithreading scenarios (nbWorkers >= 1).\\n  Probe the oldest active job, defined as oldest job not yet entirely flushed,\\n  and check its output buffer.\\n @return : amount of data stored in oldest job and ready to be flushed immediately.\\n  if @return == 0, it means either :\\n  + there is no active job (could be checked with ZSTD_frameProgression()), or\\n  + oldest job is still actively compressing data,\\n    but everything it has produced has also been flushed so far,\\n    therefore flush speed is limited by production speed of oldest job\\n    irrespective of the speed of concurrent (and newer) jobs.\"]\n    pub fn ZSTD_toFlushNow(cctx: *mut ZSTD_CCtx) -> usize;\n}\nextern \"C\" {\n    #[doc = \" This function is deprecated, and is equivalent to:\\n\\n     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);\\n     ZSTD_DCtx_loadDictionary(zds, dict, dictSize);\\n\\n note: no dictionary will be used if dict == NULL or dictSize < 8\"]\n    pub fn ZSTD_initDStream_usingDict(\n        zds: *mut ZSTD_DStream,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" This function is deprecated, and is equivalent to:\\n\\n     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);\\n     ZSTD_DCtx_refDDict(zds, ddict);\\n\\n note : ddict is referenced, it must outlive decompression session\"]\n    pub fn ZSTD_initDStream_usingDDict(\n        zds: *mut ZSTD_DStream,\n        ddict: *const ZSTD_DDict,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \" This function is deprecated, and is equivalent to:\\n\\n     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);\\n\\n re-use decompression parameters from previous init; saves dictionary loading\"]\n    pub fn ZSTD_resetDStream(zds: *mut ZSTD_DStream) -> usize;\n}\npub type ZSTD_sequenceProducer_F = ::core::option::Option<\n    unsafe extern \"C\" fn(\n        sequenceProducerState: *mut ::core::ffi::c_void,\n        outSeqs: *mut ZSTD_Sequence,\n        outSeqsCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        compressionLevel: ::core::ffi::c_int,\n        windowSize: usize,\n    ) -> usize,\n>;\nextern \"C\" {\n    #[doc = \" ZSTD_registerSequenceProducer() :\\n Instruct zstd to use a block-level external sequence producer function.\\n\\n The sequenceProducerState must be initialized by the caller, and the caller is\\n responsible for managing its lifetime. This parameter is sticky across\\n compressions. It will remain set until the user explicitly resets compression\\n parameters.\\n\\n Sequence producer registration is considered to be an \\\"advanced parameter\\\",\\n part of the \\\"advanced API\\\". This means it will only have an effect on compression\\n APIs which respect advanced parameters, such as compress2() and compressStream2().\\n Older compression APIs such as compressCCtx(), which predate the introduction of\\n \\\"advanced parameters\\\", will ignore any external sequence producer setting.\\n\\n The sequence producer can be \\\"cleared\\\" by registering a NULL function pointer. This\\n removes all limitations described above in the \\\"LIMITATIONS\\\" section of the API docs.\\n\\n The user is strongly encouraged to read the full API documentation (above) before\\n calling this function.\"]\n    pub fn ZSTD_registerSequenceProducer(\n        cctx: *mut ZSTD_CCtx,\n        sequenceProducerState: *mut ::core::ffi::c_void,\n        sequenceProducer: ZSTD_sequenceProducer_F,\n    );\n}\nextern \"C\" {\n    #[doc = \"Buffer-less streaming compression (synchronous mode)\\n\\nA ZSTD_CCtx object is required to track streaming operations.\\nUse ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.\\nZSTD_CCtx object can be re-used multiple times within successive compression operations.\\n\\nStart by initializing a context.\\nUse ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression.\\n\\nThen, consume your input using ZSTD_compressContinue().\\nThere are some important considerations to keep in mind when using this advanced function :\\n- ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.\\n- Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.\\n- Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.\\nWorst case evaluation is provided by ZSTD_compressBound().\\nZSTD_compressContinue() doesn't guarantee recover after a failed compression.\\n- ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog).\\nIt remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks)\\n- ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps.\\nIn which case, it will \\\"discard\\\" the relevant memory section from its history.\\n\\nFinish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.\\nIt's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.\\nWithout last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.\\n\\n`ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again.\"]\n    pub fn ZSTD_compressBegin(\n        cctx: *mut ZSTD_CCtx,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_compressBegin_usingDict(\n        cctx: *mut ZSTD_CCtx,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        compressionLevel: ::core::ffi::c_int,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_compressBegin_usingCDict(\n        cctx: *mut ZSTD_CCtx,\n        cdict: *const ZSTD_CDict,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_copyCCtx(\n        cctx: *mut ZSTD_CCtx,\n        preparedCCtx: *const ZSTD_CCtx,\n        pledgedSrcSize: ::core::ffi::c_ulonglong,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_compressContinue(\n        cctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_compressEnd(\n        cctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_compressBegin_advanced(\n        cctx: *mut ZSTD_CCtx,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n        params: ZSTD_parameters,\n        pledgedSrcSize: ::core::ffi::c_ulonglong,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_compressBegin_usingCDict_advanced(\n        cctx: *mut ZSTD_CCtx,\n        cdict: *const ZSTD_CDict,\n        fParams: ZSTD_frameParameters,\n        pledgedSrcSize: ::core::ffi::c_ulonglong,\n    ) -> usize;\n}\nextern \"C\" {\n    #[doc = \"Buffer-less streaming decompression (synchronous mode)\\n\\nA ZSTD_DCtx object is required to track streaming operations.\\nUse ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.\\nA ZSTD_DCtx object can be re-used multiple times.\\n\\nFirst typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().\\nFrame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.\\nData fragment must be large enough to ensure successful decoding.\\n`ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.\\nresult  : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.\\n>0 : `srcSize` is too small, please provide at least result bytes on next attempt.\\nerrorCode, which can be tested using ZSTD_isError().\\n\\nIt fills a ZSTD_frameHeader structure with important information to correctly decode the frame,\\nsuch as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).\\nNote that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.\\nAs a consequence, check that values remain within valid application range.\\nFor example, do not allocate memory blindly, check that `windowSize` is within expectation.\\nEach application can set its own limits, depending on local restrictions.\\nFor extended interoperability, it is recommended to support `windowSize` of at least 8 MB.\\n\\nZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes.\\nZSTD_decompressContinue() is very sensitive to contiguity,\\nif 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,\\nor that previous contiguous segment is large enough to properly handle maximum back-reference distance.\\nThere are multiple ways to guarantee this condition.\\n\\nThe most memory efficient way is to use a round buffer of sufficient size.\\nSufficient size is determined by invoking ZSTD_decodingBufferSize_min(),\\nwhich can return an error code if required value is too large for current system (in 32-bits mode).\\nIn a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,\\nup to the moment there is not enough room left in the buffer to guarantee decoding another full block,\\nwhich maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.\\nAt which point, decoding can resume from the beginning of the buffer.\\nNote that already decoded data stored in the buffer should be flushed before being overwritten.\\n\\nThere are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory.\\n\\nFinally, if you control the compression process, you can also ignore all buffer size rules,\\nas long as the encoder and decoder progress in \\\"lock-step\\\",\\naka use exactly the same buffer sizes, break contiguity at the same place, etc.\\n\\nOnce buffers are setup, start decompression, with ZSTD_decompressBegin().\\nIf decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().\\n\\nThen use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.\\nZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().\\nZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.\\n\\nresult of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).\\nIt can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.\\nIt can also be an error code, which can be tested with ZSTD_isError().\\n\\nA frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.\\nContext can then be reset to start a new decompression.\\n\\nNote : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType().\\nThis information is not required to properly decode a frame.\\n\\n== Special case : skippable frames ==\\n\\nSkippable frames allow integration of user-defined data into a flow of concatenated frames.\\nSkippable frames will be ignored (skipped) by decompressor.\\nThe format of skippable frames is as follows :\\na) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F\\nb) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits\\nc) Frame Content - any content (User Data) of length equal to Frame Size\\nFor skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame.\\nFor skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content.\"]\n    pub fn ZSTD_decodingBufferSize_min(\n        windowSize: ::core::ffi::c_ulonglong,\n        frameContentSize: ::core::ffi::c_ulonglong,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_decompressBegin(dctx: *mut ZSTD_DCtx) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_decompressBegin_usingDict(\n        dctx: *mut ZSTD_DCtx,\n        dict: *const ::core::ffi::c_void,\n        dictSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_decompressBegin_usingDDict(\n        dctx: *mut ZSTD_DCtx,\n        ddict: *const ZSTD_DDict,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_nextSrcSizeToDecompress(dctx: *mut ZSTD_DCtx) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_decompressContinue(\n        dctx: *mut ZSTD_DCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_copyDCtx(dctx: *mut ZSTD_DCtx, preparedDCtx: *const ZSTD_DCtx);\n}\n#[repr(u32)]\n#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]\npub enum ZSTD_nextInputType_e {\n    ZSTDnit_frameHeader = 0,\n    ZSTDnit_blockHeader = 1,\n    ZSTDnit_block = 2,\n    ZSTDnit_lastBlock = 3,\n    ZSTDnit_checksum = 4,\n    ZSTDnit_skippableFrame = 5,\n}\nextern \"C\" {\n    pub fn ZSTD_nextInputType(dctx: *mut ZSTD_DCtx) -> ZSTD_nextInputType_e;\n}\nextern \"C\" {\n    #[doc = \"This API is deprecated in favor of the regular compression API.\\nYou can get the frame header down to 2 bytes by setting:\\n- ZSTD_c_format = ZSTD_f_zstd1_magicless\\n- ZSTD_c_contentSizeFlag = 0\\n- ZSTD_c_checksumFlag = 0\\n- ZSTD_c_dictIDFlag = 0\\n\\nThis API is not as well tested as our normal API, so we recommend not using it.\\nWe will be removing it in a future version. If the normal API doesn't provide\\nthe functionality you need, please open a GitHub issue.\\n\\nBlock functions produce and decode raw zstd blocks, without frame metadata.\\nFrame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes).\\nBut users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.\\n\\nA few rules to respect :\\n- Compressing and decompressing require a context structure\\n+ Use ZSTD_createCCtx() and ZSTD_createDCtx()\\n- It is necessary to init context before starting\\n+ compression : any ZSTD_compressBegin*() variant, including with dictionary\\n+ decompression : any ZSTD_decompressBegin*() variant, including with dictionary\\n- Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB\\n+ If input is larger than a block size, it's necessary to split input data into multiple blocks\\n+ For inputs larger than a single block, consider using regular ZSTD_compress() instead.\\nFrame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block.\\n- When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) !\\n===> In which case, nothing is produced into `dst` !\\n+ User __must__ test for such outcome and deal directly with uncompressed data\\n+ A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0.\\nDoing so would mess up with statistics history, leading to potential data corruption.\\n+ ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !!\\n+ In case of multiple successive blocks, should some of them be uncompressed,\\ndecoder must be informed of their existence in order to follow proper history.\\nUse ZSTD_insertBlock() for such a case.\"]\n    pub fn ZSTD_getBlockSize(cctx: *const ZSTD_CCtx) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_compressBlock(\n        cctx: *mut ZSTD_CCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_decompressBlock(\n        dctx: *mut ZSTD_DCtx,\n        dst: *mut ::core::ffi::c_void,\n        dstCapacity: usize,\n        src: *const ::core::ffi::c_void,\n        srcSize: usize,\n    ) -> usize;\n}\nextern \"C\" {\n    pub fn ZSTD_insertBlock(\n        dctx: *mut ZSTD_DCtx,\n        blockStart: *const ::core::ffi::c_void,\n        blockSize: usize,\n    ) -> usize;\n}\n"
  },
  {
    "path": "zstd-safe/zstd-sys/src/lib.rs",
    "content": "#![allow(non_upper_case_globals)]\n#![allow(non_camel_case_types)]\n#![allow(non_snake_case)]\n#![no_std]\n//! Low-level bindings to the [zstd] library.\n//!\n//! [zstd]: https://facebook.github.io/zstd/\n\n#[cfg(target_arch = \"wasm32\")]\nextern crate alloc;\n\n#[cfg(target_arch = \"wasm32\")]\nmod wasm_shim;\n\n// If running bindgen, we'll end up with the correct bindings anyway.\n#[cfg(feature = \"bindgen\")]\ninclude!(concat!(env!(\"OUT_DIR\"), \"/bindings.rs\"));\n\n// The bindings used depend on a few feature flags.\n#[cfg(all(not(feature = \"experimental\"), not(feature = \"bindgen\")))]\ninclude!(\"bindings_zstd.rs\");\n\n#[cfg(all(\n    not(feature = \"experimental\"),\n    feature = \"zdict_builder\",\n    not(feature = \"bindgen\")\n))]\ninclude!(\"bindings_zdict.rs\");\n\n#[cfg(all(feature = \"experimental\", not(feature = \"bindgen\")))]\ninclude!(\"bindings_zstd_experimental.rs\");\n\n#[cfg(all(\n    feature = \"experimental\",\n    feature = \"zdict_builder\",\n    not(feature = \"bindgen\")\n))]\ninclude!(\"bindings_zdict_experimental.rs\");\n\n#[cfg(all(feature = \"seekable\", not(feature = \"bindgen\")))]\ninclude!(\"bindings_zstd_seekable.rs\");\n"
  },
  {
    "path": "zstd-safe/zstd-sys/src/wasm_shim.rs",
    "content": "use alloc::alloc::{alloc, alloc_zeroed, dealloc, Layout};\nuse core::ffi::{c_int, c_void};\n\nconst USIZE_ALIGN: usize = core::mem::align_of::<usize>();\nconst USIZE_SIZE: usize = core::mem::size_of::<usize>();\n\n#[no_mangle]\npub extern \"C\" fn rust_zstd_wasm_shim_qsort(\n    base: *mut c_void,\n    n_items: usize,\n    size: usize,\n    compar: extern \"C\" fn(*const c_void, *const c_void) -> c_int,\n) {\n    unsafe {\n        match size {\n            1 => qsort::<1>(base, n_items, compar),\n            2 => qsort::<2>(base, n_items, compar),\n            4 => qsort::<4>(base, n_items, compar),\n            8 => qsort::<8>(base, n_items, compar),\n            16 => qsort::<16>(base, n_items, compar),\n            _ => panic!(\"Unsupported qsort item size\"),\n        }\n    }\n}\n\nunsafe fn qsort<const N: usize>(\n    base: *mut c_void,\n    n_items: usize,\n    compar: extern \"C\" fn(*const c_void, *const c_void) -> c_int,\n) {\n    let base: &mut [[u8; N]] =\n        core::slice::from_raw_parts_mut(base as *mut [u8; N], n_items);\n    base.sort_unstable_by(|a, b| {\n        match compar(a.as_ptr() as *const c_void, b.as_ptr() as *const c_void)\n        {\n            ..=-1 => core::cmp::Ordering::Less,\n            0 => core::cmp::Ordering::Equal,\n            1.. => core::cmp::Ordering::Greater,\n        }\n    });\n}\n\n#[no_mangle]\npub extern \"C\" fn rust_zstd_wasm_shim_malloc(size: usize) -> *mut c_void {\n    wasm_shim_alloc::<false>(size)\n}\n\n#[no_mangle]\npub extern \"C\" fn rust_zstd_wasm_shim_memcmp(\n    str1: *const c_void,\n    str2: *const c_void,\n    n: usize,\n) -> i32 {\n    // Safety: function contracts requires str1 and str2 at least `n`-long.\n    unsafe {\n        let str1: &[u8] = core::slice::from_raw_parts(str1 as *const u8, n);\n        let str2: &[u8] = core::slice::from_raw_parts(str2 as *const u8, n);\n        match str1.cmp(str2) {\n            core::cmp::Ordering::Less => -1,\n            core::cmp::Ordering::Equal => 0,\n            core::cmp::Ordering::Greater => 1,\n        }\n    }\n}\n\n#[no_mangle]\npub extern \"C\" fn rust_zstd_wasm_shim_calloc(\n    nmemb: usize,\n    size: usize,\n) -> *mut c_void {\n    // note: calloc expects the allocation to be zeroed\n    wasm_shim_alloc::<true>(nmemb * size)\n}\n\n#[inline]\nfn wasm_shim_alloc<const ZEROED: bool>(size: usize) -> *mut c_void {\n    // in order to recover the size upon free, we store the size below the allocation\n    // special alignment is never requested via the malloc API,\n    // so it's not stored, and usize-alignment is used\n    // memory layout: [size] [allocation]\n\n    let full_alloc_size = size + USIZE_SIZE;\n\n    unsafe {\n        let layout =\n            Layout::from_size_align_unchecked(full_alloc_size, USIZE_ALIGN);\n\n        let ptr = if ZEROED {\n            alloc_zeroed(layout)\n        } else {\n            alloc(layout)\n        };\n\n        // SAFETY: ptr is usize-aligned and we've allocated sufficient memory\n        ptr.cast::<usize>().write(full_alloc_size);\n\n        ptr.add(USIZE_SIZE).cast()\n    }\n}\n\n#[no_mangle]\npub unsafe extern \"C\" fn rust_zstd_wasm_shim_free(ptr: *mut c_void) {\n    // the layout for the allocation needs to be recovered for dealloc\n    // - the size must be recovered from directly below the allocation\n    // - the alignment will always by USIZE_ALIGN\n\n    let alloc_ptr = ptr.sub(USIZE_SIZE);\n    // SAFETY: the allocation routines must uphold having a valid usize below the provided pointer\n    let full_alloc_size = alloc_ptr.cast::<usize>().read();\n\n    let layout =\n        Layout::from_size_align_unchecked(full_alloc_size, USIZE_ALIGN);\n    dealloc(alloc_ptr.cast(), layout);\n}\n\n#[no_mangle]\npub unsafe extern \"C\" fn rust_zstd_wasm_shim_memcpy(\n    dest: *mut c_void,\n    src: *const c_void,\n    n: usize,\n) -> *mut c_void {\n    core::ptr::copy_nonoverlapping(src as *const u8, dest as *mut u8, n);\n    dest\n}\n\n#[no_mangle]\npub unsafe extern \"C\" fn rust_zstd_wasm_shim_memmove(\n    dest: *mut c_void,\n    src: *const c_void,\n    n: usize,\n) -> *mut c_void {\n    core::ptr::copy(src as *const u8, dest as *mut u8, n);\n    dest\n}\n\n#[no_mangle]\npub unsafe extern \"C\" fn rust_zstd_wasm_shim_memset(\n    dest: *mut c_void,\n    c: c_int,\n    n: usize,\n) -> *mut c_void {\n    core::ptr::write_bytes(dest as *mut u8, c as u8, n);\n    dest\n}\n"
  },
  {
    "path": "zstd-safe/zstd-sys/test_it.sh",
    "content": "#!/bin/sh\nfor EXP in \"experimental\" \"\"; do\n    for STD in \"std\" \"\"; do\n        cargo test --features \"$EXP $STD\"\n    done\ndone\n\n"
  },
  {
    "path": "zstd-safe/zstd-sys/update_bindings.sh",
    "content": "#!/bin/sh\n\nRUST_TARGET=1.64\nbindgen=\"bindgen --no-layout-tests --blocklist-type=max_align_t --rustified-enum=.* --use-core --rust-target $RUST_TARGET\"\nexperimental=\"-DZSTD_STATIC_LINKING_ONLY -DZDICT_STATIC_LINKING_ONLY -DZSTD_RUST_BINDINGS_EXPERIMENTAL\"\n\nrun_bindgen()\n{\n        echo \"/*\nThis file is auto-generated from the public API of the zstd library.\nIt is released under the same BSD license.\n\n$(cat zstd/LICENSE)\n*/\"\n\n    $bindgen $@\n}\n\n    for EXPERIMENTAL_ARG in \"$experimental\" \"\"; do\n        if [ -z \"$EXPERIMENTAL_ARG\" ]; then EXPERIMENTAL=\"\"; else EXPERIMENTAL=\"_experimental\"; fi\n\n        SUFFIX=${EXPERIMENTAL}\n\n        run_bindgen zstd.h \\\n            --allowlist-type \"ZSTD_.*\" \\\n            --allowlist-function \"ZSTD_.*\" \\\n            --allowlist-var \"ZSTD_.*\" \\\n            -- -Izstd/lib $EXPERIMENTAL_ARG > src/bindings_zstd${SUFFIX}.rs\n\n        run_bindgen zdict.h \\\n            --allowlist-type \"ZDICT_.*\" \\\n            --allowlist-function \"ZDICT_.*\" \\\n            --allowlist-var \"ZDICT_.*\" \\\n            -- -Izstd/lib $EXPERIMENTAL_ARG > src/bindings_zdict${SUFFIX}.rs\n    done\n\n    # - ZSTD_seekable_initFile is blocked because it expects the c FILE type, rust files can directly be passed to init_advanced()\n    run_bindgen zstd_seekable.h --allowlist-file \".*zstd_seekable.h$\" --no-recursive-allowlist \\\n      --blocklist-function ZSTD_seekable_initFile \\\n      -- -Izstd/lib > src/bindings_zstd_seekable.rs\n"
  },
  {
    "path": "zstd-safe/zstd-sys/update_zstd.sh",
    "content": "#!/bin/bash\nset -e\nset -o pipefail\n\ncd zstd\nCURRENT=$(git describe --tags)\ngit fetch -q\nTAG=$(git tag -l | grep '^v1' | sort | tail -n 1)\n\nif [ $CURRENT != $TAG ]\nthen\n    git checkout $TAG\n    cd ..\n    git add zstd\n    ./update_bindings.sh\n    git add src/bindings*.rs\n    cd ..\n    ./update_consts.sh\n    git add src/constants*.rs\n    cd zstd-sys\n\n    # Note: You'll need a forked version of cargo-bump that supports metadata\n    # For instance https://github.com/gyscos/cargo-bump\n    METADATA=\"zstd.${TAG/v/}\"\n    cargo bump patch --build $METADATA\n    ZSTD_SYS_VERSION=$(cargo read-manifest | jq -r .version | cut -d+ -f1)\n    git add Cargo.toml\n    cd ..\n    cargo add zstd-sys --path ./zstd-sys --vers \"=${ZSTD_SYS_VERSION}\" --no-default-features\n    cargo bump patch --build $METADATA\n    ZSTD_SAFE_VERSION=$(cargo read-manifest | jq -r .version | cut -d+ -f1)\n    git add Cargo.toml\n    cd ..\n    cargo add zstd-safe --path ./zstd-safe --vers \"=${ZSTD_SAFE_VERSION}\" --no-default-features\n    cargo bump patch --build $METADATA\n    ZSTD_RS_VERSION=$(cargo read-manifest | jq -r .version | cut -d+ -f1)\n    git add Cargo.toml\n\n    cargo check\n\n    git commit -m \"Update zstd to $TAG\"\n\n    # Publish?\n    read -p \"Publish to crates.io? \" -n 1 -r\n    echo\n    if [[ $REPLY =~ ^[Yy]$ ]]\n    then\n        cd zstd-safe/zstd-sys\n        # Need to wait so that the index refreshes.\n        cargo publish && sleep 5\n        cd ..\n        cargo publish && sleep 5\n        cd ..\n        cargo publish\n        git tag $ZSTD_RS_VERSION\n    else\n        echo \"Would have published $ZSTD_RS_VERSION\"\n    fi\n\nelse\n    echo \"Already using zstd $TAG\"\nfi\n\n"
  },
  {
    "path": "zstd-safe/zstd-sys/wasm-shim/assert.h",
    "content": "#ifndef _ASSERT_H\n#define _ASSERT_H\n\n#define assert(expr)\n\n#endif // _ASSERT_H\n"
  },
  {
    "path": "zstd-safe/zstd-sys/wasm-shim/stdio.h",
    "content": "#include <stddef.h>\n\n#ifndef\t_STDIO_H\n#define\t_STDIO_H\t1\n\n#define fprintf(expr, ...)\n#define fflush(expr)\n\n#endif // _STDIO_H\n\n"
  },
  {
    "path": "zstd-safe/zstd-sys/wasm-shim/stdlib.h",
    "content": "#include <stddef.h>\n\n#ifndef _STDLIB_H\n#define _STDLIB_H 1\n\nvoid* rust_zstd_wasm_shim_malloc(size_t size);\nvoid* rust_zstd_wasm_shim_calloc(size_t nmemb, size_t size);\nvoid rust_zstd_wasm_shim_free(void* ptr);\nvoid rust_zstd_wasm_shim_qsort(void* base, size_t nitems, size_t size,\n                               int (*compar)(const void*, const void*));\n\n#define malloc(size) rust_zstd_wasm_shim_malloc(size)\n#define calloc(nmemb, size) rust_zstd_wasm_shim_calloc(nmemb, size)\n#define free(ptr) rust_zstd_wasm_shim_free(ptr)\n#define qsort(base, nitems, size, compar) \\\n  rust_zstd_wasm_shim_qsort(base, nitems, size, compar)\n\n#endif  // _STDLIB_H\n"
  },
  {
    "path": "zstd-safe/zstd-sys/wasm-shim/string.h",
    "content": "#include <stdlib.h>\n\n#ifndef\t_STRING_H\n#define\t_STRING_H\t1\n\nint rust_zstd_wasm_shim_memcmp(const void *str1, const void *str2, size_t n);\nvoid *rust_zstd_wasm_shim_memcpy(void *restrict dest, const void *restrict src, size_t n);\nvoid *rust_zstd_wasm_shim_memmove(void *dest, const void *src, size_t n);\nvoid *rust_zstd_wasm_shim_memset(void *dest, int c, size_t n);\n\ninline int memcmp(const void *str1, const void *str2, size_t n) {\n    return rust_zstd_wasm_shim_memcmp(str1, str2, n);\n}\n\ninline void *memcpy(void *restrict dest, const void *restrict src, size_t n) {\n\treturn rust_zstd_wasm_shim_memcpy(dest, src, n);\n}\n\ninline void *memmove(void *dest, const void *src, size_t n) {\n\treturn rust_zstd_wasm_shim_memmove(dest, src, n);\n}\n\ninline void *memset(void *dest, int c, size_t n) {\n\treturn rust_zstd_wasm_shim_memset(dest, c, n);\n}\n\n#endif // _STRING_H\n"
  },
  {
    "path": "zstd-safe/zstd-sys/wasm-shim/time.h",
    "content": "#ifndef _TIME_H\n#define _TIME_H\n\n#define CLOCKS_PER_SEC 1000\n\ntypedef unsigned long long clock_t;\n\n// Clock is just use for progress reporting, which we disable anyway.\ninline clock_t clock() {\n    return 0;\n}\n\n#endif // _TIME_H\n"
  },
  {
    "path": "zstd-safe/zstd-sys/zdict.h",
    "content": "#ifdef PKG_CONFIG\n\n/* Just use installed headers */\n#include <zdict.h>\n// Don't use experimental features like zstdmt\n\n#else // #ifdef PKG_CONFIG\n\n#include \"zstd/lib/zdict.h\"\n\n#endif // #ifdef PKG_CONFIG\n\n\n/* This file is used to generate bindings for both headers.\n * Check update_bindings.sh to see how to use it.\n * Or use the `bindgen` feature, which will create the bindings automatically. */\n\n"
  },
  {
    "path": "zstd-safe/zstd-sys/zstd.h",
    "content": "#ifdef PKG_CONFIG\n\n/* Just use installed headers */\n#include <zstd.h>\n#ifdef ZSTD_RUST_BINDINGS_EXPERIMENTAL\n#include <zstd_errors.h>\n#endif  // #ifdef ZSTD_RUST_BINDINGS_EXPERIMENTAL\n\n#else // #ifdef PKG_CONFIG\n\n#include \"zstd/lib/zstd.h\"\n#ifdef ZSTD_RUST_BINDINGS_EXPERIMENTAL\n#include \"zstd/lib/zstd_errors.h\"\n#endif // #ifdef ZSTD_RUST_BINDINGS_EXPERIMENTAL\n\n#endif // #ifdef PKG_CONFIG\n\n\n/* This file is used to generate bindings for both headers.\n * Check update_bindings.sh to see how to use it.\n * Or use the `bindgen` feature, which will create the bindings automatically. */\n"
  },
  {
    "path": "zstd-safe/zstd-sys/zstd_seekable.h",
    "content": "#ifdef PKG_CONFIG\n\n/* Just use installed headers */\n#include <zstd_seekable.h>\n// Don't use experimental features like zstdmt\n\n#else // #ifdef PKG_CONFIG\n\n#include \"zstd/contrib/seekable_format/zstd_seekable.h\"\n\n#endif // #ifdef PKG_CONFIG\n\n\n/* This file is used to generate bindings for the Zstandard Seekable Format.\n * Check update_bindings.sh to see how to use it.\n * Or use the `bindgen` feature, which will create the bindings automatically. */\n"
  }
]